From 0b76ed1b3fbb5ef044a9558051c5a1972cdfe515 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Thu, 10 Oct 2024 11:41:13 -0700 Subject: [PATCH 01/27] Update badge --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a540f002d..10ff2efc5 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ [![Latest Release](https://img.shields.io/github/v/release/NHERI-SimCenter/pelicun?color=blue&label=Latest%20Release)](https://github.com/NHERI-SimCenter/pelicun/releases/latest) ![Tests](https://github.com/NHERI-SimCenter/pelicun/actions/workflows/tests.yml/badge.svg) [![codecov](https://codecov.io/github/NHERI-SimCenter/pelicun/branch/master/graph/badge.svg?token=W79M5FGOCG)](https://codecov.io/github/NHERI-SimCenter/pelicun/tree/master) -[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +[![Ruff](https://img.shields.io/badge/ruff-linted-blue)](https://img.shields.io/badge/ruff-linted-blue) [![License](https://img.shields.io/badge/License-BSD%203--Clause-blue)](https://raw.githubusercontent.com/NHERI-SimCenter/pelicun/master/LICENSE) ## What is it? From b459316af278c89f8899e2a3f79a8c2e7901cab6 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Sun, 18 Aug 2024 16:01:45 -0700 Subject: [PATCH 02/27] Update .gitignore --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index 5b7a7e797..400bdc0de 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,7 @@ __pycache__ flycheck*.py /notes/ /.ropeproject/ + +/doc_src/ +/doc/build/ +/doc/source/api_reference/_autosummary From 95bcff872a543da15843fed6f5b5482bc8fe624e Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Sat, 12 Oct 2024 10:52:49 -0700 Subject: [PATCH 03/27] Move files around and add `__init__.py` files. --- pelicun/__init__.py | 51 ++-------- pelicun/model/__init__.py | 78 ++++++++------- pelicun/tests/__init__.py | 33 +++++++ pelicun/tests/basic/__init__.py | 33 +++++++ pelicun/tests/dl_calculation/__init__.py | 33 +++++++ pelicun/tests/dl_calculation/e1/__init__.py | 33 +++++++ pelicun/tests/dl_calculation/e2/__init__.py | 33 +++++++ pelicun/tests/dl_calculation/e3/__init__.py | 33 +++++++ pelicun/tests/dl_calculation/e4/__init__.py | 33 +++++++ pelicun/tests/dl_calculation/e5/__init__.py | 33 +++++++ pelicun/tests/dl_calculation/e6/__init__.py | 33 +++++++ pelicun/tests/dl_calculation/e7/__init__.py | 33 +++++++ pelicun/tests/dl_calculation/e8/__init__.py | 33 +++++++ pelicun/tests/dl_calculation/e9/__init__.py | 33 +++++++ .../tests/dl_calculation/other/__init__.py | 33 +++++++ .../tests/dl_calculation/other/o1/__init__.py | 33 +++++++ .../tests/dl_calculation/rulesets/__init__.py | 33 +++++++ pelicun/tests/maintenance/__init__.py | 33 +++++++ pelicun/tests/validation/__init__.py | 33 +++++++ pelicun/tests/validation/inactive/__init__.py | 33 +++++++ pelicun/tests/validation/v0/__init__.py | 33 +++++++ .../{0 => v0}/data/CMP_marginals.csv | 0 .../{0 => v0}/data/loss_functions.csv | 0 pelicun/tests/validation/{0 => v0}/readme.md | 0 .../validation/{0 => v0}/test_validation_0.py | 27 +++--- pelicun/tests/validation/v1/__init__.py | 33 +++++++ .../{1 => v1}/data/CMP_marginals.csv | 0 .../validation/{1 => v1}/data/damage_db.csv | 0 pelicun/tests/validation/{1 => v1}/readme.md | 2 +- .../validation/{1 => v1}/test_validation_1.py | 30 +++--- pelicun/tests/validation/v2/__init__.py | 33 +++++++ .../{2 => v2}/data/CMP_marginals.csv | 0 .../data/additional_consequences.csv | 0 .../{2 => v2}/data/additional_damage_db.csv | 0 .../data/additional_loss_functions.csv | 0 .../validation/{2 => v2}/data/demand_data.csv | 0 .../{2 => v2}/data/loss_functions.csv | 0 pelicun/tests/validation/{2 => v2}/readme.md | 0 .../validation/{2 => v2}/test_validation_2.py | 94 +++++++++++++------ pelicun/tools/__init__.py | 34 +++++++ 40 files changed, 861 insertions(+), 148 deletions(-) create mode 100644 pelicun/tests/basic/__init__.py create mode 100644 pelicun/tests/dl_calculation/__init__.py create mode 100644 pelicun/tests/dl_calculation/e1/__init__.py create mode 100644 pelicun/tests/dl_calculation/e2/__init__.py create mode 100644 pelicun/tests/dl_calculation/e3/__init__.py create mode 100644 pelicun/tests/dl_calculation/e4/__init__.py create mode 100644 pelicun/tests/dl_calculation/e5/__init__.py create mode 100644 pelicun/tests/dl_calculation/e6/__init__.py create mode 100644 pelicun/tests/dl_calculation/e7/__init__.py create mode 100644 pelicun/tests/dl_calculation/e8/__init__.py create mode 100644 pelicun/tests/dl_calculation/e9/__init__.py create mode 100644 pelicun/tests/dl_calculation/other/__init__.py create mode 100644 pelicun/tests/dl_calculation/other/o1/__init__.py create mode 100644 pelicun/tests/dl_calculation/rulesets/__init__.py create mode 100644 pelicun/tests/maintenance/__init__.py create mode 100644 pelicun/tests/validation/__init__.py create mode 100644 pelicun/tests/validation/inactive/__init__.py create mode 100644 pelicun/tests/validation/v0/__init__.py rename pelicun/tests/validation/{0 => v0}/data/CMP_marginals.csv (100%) rename pelicun/tests/validation/{0 => v0}/data/loss_functions.csv (100%) rename pelicun/tests/validation/{0 => v0}/readme.md (100%) rename pelicun/tests/validation/{0 => v0}/test_validation_0.py (86%) create mode 100644 pelicun/tests/validation/v1/__init__.py rename pelicun/tests/validation/{1 => v1}/data/CMP_marginals.csv (100%) rename pelicun/tests/validation/{1 => v1}/data/damage_db.csv (100%) rename pelicun/tests/validation/{1 => v1}/readme.md (90%) rename pelicun/tests/validation/{1 => v1}/test_validation_1.py (86%) create mode 100644 pelicun/tests/validation/v2/__init__.py rename pelicun/tests/validation/{2 => v2}/data/CMP_marginals.csv (100%) rename pelicun/tests/validation/{2 => v2}/data/additional_consequences.csv (100%) rename pelicun/tests/validation/{2 => v2}/data/additional_damage_db.csv (100%) rename pelicun/tests/validation/{2 => v2}/data/additional_loss_functions.csv (100%) rename pelicun/tests/validation/{2 => v2}/data/demand_data.csv (100%) rename pelicun/tests/validation/{2 => v2}/data/loss_functions.csv (100%) rename pelicun/tests/validation/{2 => v2}/readme.md (100%) rename pelicun/tests/validation/{2 => v2}/test_validation_2.py (56%) create mode 100644 pelicun/tools/__init__.py diff --git a/pelicun/__init__.py b/pelicun/__init__.py index ff1bb80c8..6f8cdc995 100644 --- a/pelicun/__init__.py +++ b/pelicun/__init__.py @@ -1,52 +1,13 @@ -""" --*- coding: utf-8 -*- +"""Main public package.""" -Copyright (c) 2018 Leland Stanford Junior University -Copyright (c) 2018 The Regents of the University of California - -This file is part of pelicun. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors -may be used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -You should have received a copy of the BSD 3-Clause License along with -pelicun. If not, see . - -Contributors: -Adam Zsarnóczay -""" - -name = "pelicun" +name = 'pelicun' __version__ = '3.3.3' __copyright__ = ( - "Copyright (c) 2018 Leland Stanford " - "Junior University and The Regents " - "of the University of California" + 'Copyright (c) 2018 Leland Stanford ' + 'Junior University and The Regents ' + 'of the University of California' ) -__license__ = "BSD 3-Clause License" +__license__ = 'BSD 3-Clause License' diff --git a/pelicun/model/__init__.py b/pelicun/model/__init__.py index f4734f486..d410a1d17 100644 --- a/pelicun/model/__init__.py +++ b/pelicun/model/__init__.py @@ -1,52 +1,48 @@ -""" --*- coding: utf-8 -*- +# +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California -Copyright (c) 2018 Leland Stanford Junior University -Copyright (c) 2018 The Regents of the University of California +# This file is part of pelicun. -This file is part of pelicun. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. -1. Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. -2. Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. -3. Neither the name of the copyright holder nor the names of its contributors -may be used to endorse or promote products derived from this software without -specific prior written permission. +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . -You should have received a copy of the BSD 3-Clause License along with -pelicun. If not, see . - -Contributors: -Adam Zsarnóczay -""" - -# flake8: noqa +"""Pelicun model.""" from __future__ import annotations -from pelicun.model.pelicun_model import PelicunModel -from pelicun.model.demand_model import DemandModel + from pelicun.model.asset_model import AssetModel -from pelicun.model.damage_model import DamageModel -from pelicun.model.damage_model import DamageModel_DS -from pelicun.model.loss_model import LossModel -from pelicun.model.loss_model import RepairModel_DS -from pelicun.model.loss_model import RepairModel_LF +from pelicun.model.damage_model import DamageModel, DamageModel_DS +from pelicun.model.demand_model import DemandModel +from pelicun.model.loss_model import ( + LossModel, + RepairModel_DS, + RepairModel_LF, +) +from pelicun.model.pelicun_model import PelicunModel diff --git a/pelicun/tests/__init__.py b/pelicun/tests/__init__.py index e69de29bb..72c332008 100644 --- a/pelicun/tests/__init__.py +++ b/pelicun/tests/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/basic/__init__.py b/pelicun/tests/basic/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/basic/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/dl_calculation/__init__.py b/pelicun/tests/dl_calculation/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/dl_calculation/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/dl_calculation/e1/__init__.py b/pelicun/tests/dl_calculation/e1/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/dl_calculation/e1/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/dl_calculation/e2/__init__.py b/pelicun/tests/dl_calculation/e2/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/dl_calculation/e2/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/dl_calculation/e3/__init__.py b/pelicun/tests/dl_calculation/e3/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/dl_calculation/e3/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/dl_calculation/e4/__init__.py b/pelicun/tests/dl_calculation/e4/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/dl_calculation/e4/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/dl_calculation/e5/__init__.py b/pelicun/tests/dl_calculation/e5/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/dl_calculation/e5/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/dl_calculation/e6/__init__.py b/pelicun/tests/dl_calculation/e6/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/dl_calculation/e6/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/dl_calculation/e7/__init__.py b/pelicun/tests/dl_calculation/e7/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/dl_calculation/e7/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/dl_calculation/e8/__init__.py b/pelicun/tests/dl_calculation/e8/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/dl_calculation/e8/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/dl_calculation/e9/__init__.py b/pelicun/tests/dl_calculation/e9/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/dl_calculation/e9/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/dl_calculation/other/__init__.py b/pelicun/tests/dl_calculation/other/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/dl_calculation/other/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/dl_calculation/other/o1/__init__.py b/pelicun/tests/dl_calculation/other/o1/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/dl_calculation/other/o1/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/dl_calculation/rulesets/__init__.py b/pelicun/tests/dl_calculation/rulesets/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/dl_calculation/rulesets/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/maintenance/__init__.py b/pelicun/tests/maintenance/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/maintenance/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/validation/__init__.py b/pelicun/tests/validation/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/validation/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/validation/inactive/__init__.py b/pelicun/tests/validation/inactive/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/validation/inactive/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/validation/v0/__init__.py b/pelicun/tests/validation/v0/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/validation/v0/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/validation/0/data/CMP_marginals.csv b/pelicun/tests/validation/v0/data/CMP_marginals.csv similarity index 100% rename from pelicun/tests/validation/0/data/CMP_marginals.csv rename to pelicun/tests/validation/v0/data/CMP_marginals.csv diff --git a/pelicun/tests/validation/0/data/loss_functions.csv b/pelicun/tests/validation/v0/data/loss_functions.csv similarity index 100% rename from pelicun/tests/validation/0/data/loss_functions.csv rename to pelicun/tests/validation/v0/data/loss_functions.csv diff --git a/pelicun/tests/validation/0/readme.md b/pelicun/tests/validation/v0/readme.md similarity index 100% rename from pelicun/tests/validation/0/readme.md rename to pelicun/tests/validation/v0/readme.md diff --git a/pelicun/tests/validation/0/test_validation_0.py b/pelicun/tests/validation/v0/test_validation_0.py similarity index 86% rename from pelicun/tests/validation/0/test_validation_0.py rename to pelicun/tests/validation/v0/test_validation_0.py index c035104b4..c43ab719e 100644 --- a/pelicun/tests/validation/0/test_validation_0.py +++ b/pelicun/tests/validation/v0/test_validation_0.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -33,10 +32,6 @@ # # You should have received a copy of the BSD 3-Clause License along with # pelicun. If not, see . -# -# Contributors: -# Adam Zsarnóczay -# John Vouvakis Manousakis """ Validation test on loss functions. @@ -49,18 +44,18 @@ """ from __future__ import annotations + import numpy as np import pandas as pd -import pelicun -from pelicun import assessment +from pelicun import assessment, file_io -def test_validation_loss_function(): +def test_validation_loss_function() -> None: sample_size = 100000 # initialize a pelicun assessment - asmnt = assessment.Assessment({"PrintLog": False, "Seed": 42}) + asmnt = assessment.Assessment({'PrintLog': False, 'Seed': 42}) # # Demands @@ -82,7 +77,7 @@ def test_validation_loss_function(): asmnt.demand.load_model({'marginals': demands}) - asmnt.demand.generate_sample({"SampleSize": sample_size}) + asmnt.demand.generate_sample({'SampleSize': sample_size}) # # Asset @@ -91,7 +86,7 @@ def test_validation_loss_function(): asmnt.stories = 1 cmp_marginals = pd.read_csv( - 'pelicun/tests/validation/0/data/CMP_marginals.csv', index_col=0 + 'pelicun/tests/validation/v0/data/CMP_marginals.csv', index_col=0 ) cmp_marginals['Blocks'] = cmp_marginals['Blocks'] asmnt.asset.load_cmp_model({'marginals': cmp_marginals}) @@ -113,23 +108,25 @@ def test_validation_loss_function(): loss_map = pd.DataFrame(['cmp.A'], columns=['Repair'], index=['cmp.A']) asmnt.loss.add_loss_map(loss_map) - loss_functions = pelicun.file_io.load_data( - 'pelicun/tests/validation/0/data/loss_functions.csv', + loss_functions = file_io.load_data( + 'pelicun/tests/validation/v0/data/loss_functions.csv', reindex=False, unit_conversion_factors=asmnt.unit_conversion_factors, ) + assert isinstance(loss_functions, pd.DataFrame) asmnt.loss.load_model_parameters([loss_functions]) asmnt.loss.calculate() loss, _ = asmnt.loss.aggregate_losses(future=True) + assert isinstance(loss, pd.DataFrame) - loss_vals = loss['repair_cost'].values + loss_vals = loss['repair_cost'].to_numpy() # sample median should be close to 0.05 assert np.allclose(np.median(loss_vals), 0.05, atol=1e-2) # dispersion should be close to 0.9 assert np.allclose(np.log(loss_vals).std(), 0.90, atol=1e-2) - # # TODO also test save/load sample + # TODO(JVM): also test save/load sample # asmnt.loss.save_sample('/tmp/sample.csv') # asmnt.loss.load_sample('/tmp/sample.csv') diff --git a/pelicun/tests/validation/v1/__init__.py b/pelicun/tests/validation/v1/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/validation/v1/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/validation/1/data/CMP_marginals.csv b/pelicun/tests/validation/v1/data/CMP_marginals.csv similarity index 100% rename from pelicun/tests/validation/1/data/CMP_marginals.csv rename to pelicun/tests/validation/v1/data/CMP_marginals.csv diff --git a/pelicun/tests/validation/1/data/damage_db.csv b/pelicun/tests/validation/v1/data/damage_db.csv similarity index 100% rename from pelicun/tests/validation/1/data/damage_db.csv rename to pelicun/tests/validation/v1/data/damage_db.csv diff --git a/pelicun/tests/validation/1/readme.md b/pelicun/tests/validation/v1/readme.md similarity index 90% rename from pelicun/tests/validation/1/readme.md rename to pelicun/tests/validation/v1/readme.md index 19d3a511e..396db6f71 100644 --- a/pelicun/tests/validation/1/readme.md +++ b/pelicun/tests/validation/v1/readme.md @@ -17,4 +17,4 @@ If $\mathrm{Y} \sim \textrm{LogNormal}(\delta, \beta)$, then $\mathrm{X} = \log ``` where $\Phi$ is the cumulative distribution function of the standard normal distribution, $\delta_{C1}$, $\delta_{C2}$, $\beta_{C1}$, $\beta_{C2}$ are the medians and dispersions of the fragility curve capacities, and $\delta_{D}$, $\beta_{D}$ is the median and dispersion of the EDP demand. -The equations inherently asume that the capacity RVs for the damage states are perfectly correlated, which is the case for sequential damage states. +The equations inherently assume that the capacity RVs for the damage states are perfectly correlated, which is the case for sequential damage states. diff --git a/pelicun/tests/validation/1/test_validation_1.py b/pelicun/tests/validation/v1/test_validation_1.py similarity index 86% rename from pelicun/tests/validation/1/test_validation_1.py rename to pelicun/tests/validation/v1/test_validation_1.py index aa22954e1..0b94b8598 100644 --- a/pelicun/tests/validation/1/test_validation_1.py +++ b/pelicun/tests/validation/v1/test_validation_1.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -33,10 +32,6 @@ # # You should have received a copy of the BSD 3-Clause License along with # pelicun. If not, see . -# -# Contributors: -# Adam Zsarnóczay -# John Vouvakis Manousakis """ Validation test for the probability of each damage state of a @@ -45,19 +40,20 @@ """ from __future__ import annotations + import tempfile + import numpy as np import pandas as pd -import pelicun -from pelicun import assessment from scipy.stats import norm # type: ignore +from pelicun import assessment, file_io -def test_validation_ds_probabilities(): +def test_validation_ds_probabilities() -> None: sample_size = 1000000 - asmnt = assessment.Assessment({"PrintLog": False, "Seed": 42}) + asmnt = assessment.Assessment({'PrintLog': False, 'Seed': 42}) # # Demands @@ -81,7 +77,7 @@ def test_validation_ds_probabilities(): asmnt.demand.load_model({'marginals': demands}) # generate samples - asmnt.demand.generate_sample({"SampleSize": sample_size}) + asmnt.demand.generate_sample({'SampleSize': sample_size}) # # Asset @@ -92,7 +88,7 @@ def test_validation_ds_probabilities(): # load component definitions cmp_marginals = pd.read_csv( - 'pelicun/tests/validation/1/data/CMP_marginals.csv', index_col=0 + 'pelicun/tests/validation/v1/data/CMP_marginals.csv', index_col=0 ) cmp_marginals['Blocks'] = cmp_marginals['Blocks'] asmnt.asset.load_cmp_model({'marginals': cmp_marginals}) @@ -104,11 +100,12 @@ def test_validation_ds_probabilities(): # Damage # - damage_db = pelicun.file_io.load_data( - 'pelicun/tests/validation/1/data/damage_db.csv', + damage_db = file_io.load_data( + 'pelicun/tests/validation/v1/data/damage_db.csv', reindex=False, unit_conversion_factors=asmnt.unit_conversion_factors, ) + assert isinstance(damage_db, pd.DataFrame) cmp_set = set(asmnt.asset.list_unique_component_ids()) @@ -150,14 +147,15 @@ def test_validation_ds_probabilities(): (demand_mean - capacity_2_mean) / np.sqrt(demand_std**2 + capacity_std**2) ) - assert np.allclose((probs[0]).values, p0, atol=1e-2) - assert np.allclose((probs[1]).values, p1, atol=1e-2) - assert np.allclose((probs[2]).values, p2, atol=1e-2) + assert np.allclose(probs.iloc[0, 0], p0, atol=1e-2) # type: ignore + assert np.allclose(probs.iloc[0, 1], p1, atol=1e-2) # type: ignore + assert np.allclose(probs.iloc[0, 2], p2, atol=1e-2) # type: ignore # # Also test load/save sample # + assert asmnt.damage.ds_model.sample is not None asmnt.damage.ds_model.sample = asmnt.damage.ds_model.sample.iloc[0:100, :] # (we reduce the number of realizations to conserve resources) before = asmnt.damage.ds_model.sample.copy() diff --git a/pelicun/tests/validation/v2/__init__.py b/pelicun/tests/validation/v2/__init__.py new file mode 100644 index 000000000..72c332008 --- /dev/null +++ b/pelicun/tests/validation/v2/__init__.py @@ -0,0 +1,33 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . diff --git a/pelicun/tests/validation/2/data/CMP_marginals.csv b/pelicun/tests/validation/v2/data/CMP_marginals.csv similarity index 100% rename from pelicun/tests/validation/2/data/CMP_marginals.csv rename to pelicun/tests/validation/v2/data/CMP_marginals.csv diff --git a/pelicun/tests/validation/2/data/additional_consequences.csv b/pelicun/tests/validation/v2/data/additional_consequences.csv similarity index 100% rename from pelicun/tests/validation/2/data/additional_consequences.csv rename to pelicun/tests/validation/v2/data/additional_consequences.csv diff --git a/pelicun/tests/validation/2/data/additional_damage_db.csv b/pelicun/tests/validation/v2/data/additional_damage_db.csv similarity index 100% rename from pelicun/tests/validation/2/data/additional_damage_db.csv rename to pelicun/tests/validation/v2/data/additional_damage_db.csv diff --git a/pelicun/tests/validation/2/data/additional_loss_functions.csv b/pelicun/tests/validation/v2/data/additional_loss_functions.csv similarity index 100% rename from pelicun/tests/validation/2/data/additional_loss_functions.csv rename to pelicun/tests/validation/v2/data/additional_loss_functions.csv diff --git a/pelicun/tests/validation/2/data/demand_data.csv b/pelicun/tests/validation/v2/data/demand_data.csv similarity index 100% rename from pelicun/tests/validation/2/data/demand_data.csv rename to pelicun/tests/validation/v2/data/demand_data.csv diff --git a/pelicun/tests/validation/2/data/loss_functions.csv b/pelicun/tests/validation/v2/data/loss_functions.csv similarity index 100% rename from pelicun/tests/validation/2/data/loss_functions.csv rename to pelicun/tests/validation/v2/data/loss_functions.csv diff --git a/pelicun/tests/validation/2/readme.md b/pelicun/tests/validation/v2/readme.md similarity index 100% rename from pelicun/tests/validation/2/readme.md rename to pelicun/tests/validation/v2/readme.md diff --git a/pelicun/tests/validation/2/test_validation_2.py b/pelicun/tests/validation/v2/test_validation_2.py similarity index 56% rename from pelicun/tests/validation/2/test_validation_2.py rename to pelicun/tests/validation/v2/test_validation_2.py index e897b3638..1c6e0e9ec 100644 --- a/pelicun/tests/validation/2/test_validation_2.py +++ b/pelicun/tests/validation/v2/test_validation_2.py @@ -1,3 +1,37 @@ +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + """ Tests a complete loss estimation workflow combining damage state and loss function driven components. @@ -6,24 +40,24 @@ """ import tempfile + import numpy as np import pandas as pd import pytest + import pelicun +from pelicun import assessment, file_io from pelicun.warnings import PelicunWarning -from pelicun import file_io -from pelicun import assessment -def test_combined_workflow(): - +def test_combined_workflow() -> None: temp_dir = tempfile.mkdtemp() sample_size = 10000 # Initialize a pelicun assessment asmnt = assessment.Assessment( - {"PrintLog": True, "Seed": 415, "LogFile": f'{temp_dir}/log_file.txt'} + {'PrintLog': True, 'Seed': 415, 'LogFile': f'{temp_dir}/log_file.txt'} ) asmnt.options.list_all_ds = True @@ -31,13 +65,15 @@ def test_combined_workflow(): asmnt.options.eco_scale['AcrossDamageStates'] = True demand_data = file_io.load_data( - 'pelicun/tests/validation/2/data/demand_data.csv', + 'pelicun/tests/validation/v2/data/demand_data.csv', unit_conversion_factors=None, reindex=False, ) ndims = len(demand_data) perfect_correlation = pd.DataFrame( - np.ones((ndims, ndims)), columns=demand_data.index, index=demand_data.index + np.ones((ndims, ndims)), + columns=demand_data.index, # type: ignore + index=demand_data.index, # type: ignore ) # @@ -45,12 +81,12 @@ def test_combined_workflow(): # damage_db = pelicun.file_io.load_data( - 'pelicun/tests/validation/2/data/additional_damage_db.csv', + 'pelicun/tests/validation/v2/data/additional_damage_db.csv', reindex=False, unit_conversion_factors=asmnt.unit_conversion_factors, ) consequences = pelicun.file_io.load_data( - 'pelicun/tests/validation/2/data/additional_consequences.csv', + 'pelicun/tests/validation/v2/data/additional_consequences.csv', reindex=False, unit_conversion_factors=asmnt.unit_conversion_factors, ) @@ -60,7 +96,7 @@ def test_combined_workflow(): # loss_functions = pelicun.file_io.load_data( - 'pelicun/tests/validation/2/data/additional_loss_functions.csv', + 'pelicun/tests/validation/v2/data/additional_loss_functions.csv', reindex=False, unit_conversion_factors=asmnt.unit_conversion_factors, ) @@ -75,34 +111,31 @@ def test_combined_workflow(): ) # Generate samples - asmnt.demand.generate_sample({"SampleSize": sample_size}) - - def add_more_edps(): - """ - Adds SA_1.13 and residual drift to the demand sample. + asmnt.demand.generate_sample({'SampleSize': sample_size}) - """ + def add_more_edps() -> None: + """Adds SA_1.13 and residual drift to the demand sample.""" # Add residual drift and Sa - demand_sample, demand_units = asmnt.demand.save_sample(save_units=True) + demand_sample = asmnt.demand.save_sample() # RIDs are all fixed for testing. - RID = pd.concat( + rid = pd.concat( [ pd.DataFrame( - np.full(demand_sample['PID'].shape, 0.0050), - index=demand_sample['PID'].index, - columns=demand_sample['PID'].columns, + np.full(demand_sample['PID'].shape, 0.0050), # type: ignore + index=demand_sample['PID'].index, # type: ignore + columns=demand_sample['PID'].columns, # type: ignore ) ], axis=1, keys=['RID'], ) - demand_sample_ext = pd.concat([demand_sample, RID], axis=1) + demand_sample_ext = pd.concat([demand_sample, rid], axis=1) # type: ignore demand_sample_ext[('SA_1.13', 0, 1)] = 1.50 # Add units to the data - demand_sample_ext.T.insert(0, 'Units', "") + demand_sample_ext.T.insert(0, 'Units', '') # PFA and SA are in "g" in this example, while PID and RID are "rad" demand_sample_ext.loc['Units', ['PFA', 'SA_1.13']] = 'g' @@ -121,7 +154,7 @@ def add_more_edps(): # Load component definitions cmp_marginals = pd.read_csv( - 'pelicun/tests/validation/2/data/CMP_marginals.csv', index_col=0 + 'pelicun/tests/validation/v2/data/CMP_marginals.csv', index_col=0 ) cmp_marginals['Blocks'] = cmp_marginals['Blocks'] asmnt.asset.load_cmp_model({'marginals': cmp_marginals}) @@ -145,7 +178,7 @@ def add_more_edps(): # Load the models into pelicun asmnt.damage.load_model_parameters( [ - damage_db, + damage_db, # type: ignore 'PelicunDefault/damage_DB_FEMA_P58_2nd.csv', ], cmp_set, @@ -153,8 +186,8 @@ def add_more_edps(): # Prescribe the damage process dmg_process = { - "1_collapse": {"DS1": "ALL_NA"}, - "2_excessiveRID": {"DS1": "irreparable_DS1"}, + '1_collapse': {'DS1': 'ALL_NA'}, + '2_excessiveRID': {'DS1': 'irreparable_DS1'}, } # Calculate damages @@ -165,6 +198,7 @@ def add_more_edps(): asmnt.damage.save_sample(f'{temp_dir}/out.csv') asmnt.damage.load_sample(f'{temp_dir}/out.csv') + assert asmnt.damage.ds_model.sample is not None asmnt.damage.ds_model.sample.mean() # @@ -184,9 +218,9 @@ def add_more_edps(): with pytest.warns(PelicunWarning): asmnt.loss.load_model_parameters( [ - consequences, - loss_functions, - "PelicunDefault/loss_repair_DB_FEMA_P58_2nd.csv", + consequences, # type: ignore + loss_functions, # type: ignore + 'PelicunDefault/loss_repair_DB_FEMA_P58_2nd.csv', ] ) diff --git a/pelicun/tools/__init__.py b/pelicun/tools/__init__.py new file mode 100644 index 000000000..cf08aa216 --- /dev/null +++ b/pelicun/tools/__init__.py @@ -0,0 +1,34 @@ +# noqa: D104 +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . From 40e7c6236a17ad638d5389b3f0db76b0047db071 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Sat, 12 Oct 2024 10:54:15 -0700 Subject: [PATCH 04/27] Update configuration files. --- .flake8 | 4 - .pylintrc | 574 ----------------------------------------------- ignore_words.txt | 2 + mypy.ini | 3 - pyproject.toml | 39 +++- run_checks.sh | 41 ++-- setup.py | 2 + 7 files changed, 61 insertions(+), 604 deletions(-) delete mode 100644 .flake8 delete mode 100644 .pylintrc create mode 100644 ignore_words.txt delete mode 100644 mypy.ini diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 54d60194d..000000000 --- a/.flake8 +++ /dev/null @@ -1,4 +0,0 @@ -[flake8] -max-line-length = 85 -ignore = E203, E241, E701, W503 -exclude = flycheck* \ No newline at end of file diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index 988bb59d2..000000000 --- a/.pylintrc +++ /dev/null @@ -1,574 +0,0 @@ -[MAIN] - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -init-hook='import sys; sys.path.append("."); sys.path.append("../"); sys.path.append("../../")' - -# Files or directories to be skipped. They should be base names, not -# paths. -ignore=flycheck_* - -# Add files or directories matching the regex patterns to the ignore-list. The -# regex matches against paths and can be in Posix or Windows format. -ignore-paths=rulesets - -# Files or directories matching the regex patterns are skipped. The regex -# matches against base names, not paths. -ignore-patterns=^\.# - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - pylint.extensions.check_elif, - pylint.extensions.bad_builtin, - pylint.extensions.for_any_all, - pylint.extensions.set_membership, - pylint.extensions.code_style, - pylint.extensions.overlapping_exceptions, - pylint.extensions.typing, - pylint.extensions.redefined_variable_type, - pylint.extensions.comparison_placement, - pylint.extensions.docparams - -# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the -# number of processors available to use. -jobs=0 - -# When enabled, pylint would attempt to guess common misconfiguration and emit -# user-friendly hints instead of false-positive error messages. -suggestion-mode=yes - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-allow-list= - -# Minimum supported python version -py-version = 3.7.2 - -# Control the amount of potential inferred values when inferring a single -# object. This can help the performance when dealing with large functions or -# complex, nested conditions. -limit-inference-results=100 - -# Specify a score threshold under which the program will exit with error. -fail-under=10.0 - -# Return non-zero exit code if any of these messages/categories are detected, -# even if score is above --fail-under value. Syntax same as enable. Messages -# specified are enabled, while categories only check already-enabled messages. -fail-on= - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -# confidence= - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -enable= - use-symbolic-message-instead, - useless-suppression, - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then re-enable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" - -disable= - attribute-defined-outside-init, - invalid-name, - missing-param-doc, - missing-type-doc, - protected-access, - too-few-public-methods, - # handled by black - format, - # We anticipate #3512 where it will become optional - fixme, - arguments-differ, - arguments-renamed, - else-if-used, - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=text - -# Tells whether to display a full report or only the messages -reports=no - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables 'fatal', 'error', 'warning', 'refactor', 'convention' -# and 'info', which contain the number of messages in each category, as -# well as 'statement', which is the total number of statements analyzed. This -# score is used by the global evaluation report (RP0004). -evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - -# Activate the evaluation score. -score=yes - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging - -# The type of string formatting that logging methods do. `old` means using % -# formatting, `new` is for `{}` formatting. -logging-format-style=old - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO,todo,debug - -# Regular expression of note tags to take in consideration. -#notes-rgx= - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=10 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=yes - -# Signatures are removed from the similarity computation -ignore-signatures=yes - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=_$|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid defining new builtins when possible. -additional-builtins= - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb - -# Tells whether unused global variables should be treated as a violation. -allow-global-unused-variables=yes - -# List of names allowed to shadow builtins -allowed-redefined-builtins= - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore. -ignored-argument-names=_.* - -# List of qualified module names which can have objects that can redefine -# builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=85 - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# Allow the body of a class to be on the same line as the declaration if body -# contains single statement. -single-line-class-stmt=no - -# Maximum number of lines in a module -max-module-lines=2000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - - -[BASIC] - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_ - -# Good variable names regexes, separated by a comma. If names match any regex, -# they will always be accepted -good-names-rgxs= - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Bad variable names regexes, separated by a comma. If names match any regex, -# they will always be refused -bad-names-rgxs= - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no - -# Naming style matching correct function names. -function-naming-style=snake_case - -# Regular expression matching correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming style matching correct variable names. -variable-naming-style=snake_case - -# Regular expression matching correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming style matching correct constant names. -const-naming-style=UPPER_CASE - -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Naming style matching correct attribute names. -attr-naming-style=snake_case - -# Regular expression matching correct attribute names -attr-rgx=[a-z_][a-z0-9_]{2,}$ - -# Naming style matching correct argument names. -argument-naming-style=snake_case - -# Regular expression matching correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Naming style matching correct class attribute names. -class-attribute-naming-style=any - -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Naming style matching correct class constant names. -class-const-naming-style=UPPER_CASE - -# Regular expression matching correct class constant names. Overrides class- -# const-naming-style. -#class-const-rgx= - -# Naming style matching correct inline iteration names. -inlinevar-naming-style=any - -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Naming style matching correct class names. -class-naming-style=PascalCase - -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - - -# Naming style matching correct module names. -module-naming-style=snake_case - -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - - -# Naming style matching correct method names. -method-naming-style=snake_case - -# Regular expression matching correct method names -method-rgx=[a-z_][a-z0-9_]{2,}$ - -# Regular expression matching correct type variable names -#typevar-rgx= - -# Regular expression which should only match function or class names that do -# not require a docstring. Use ^(?!__init__$)_ to also check __init__. -no-docstring-rgx=((^test_)|(^_.*(? Date: Sat, 12 Oct 2024 10:56:51 -0700 Subject: [PATCH 05/27] Linting updates --- README.md | 6 +- pelicun/__init__.py | 39 +- pelicun/assessment.py | 887 ++++++++-------- pelicun/auto.py | 75 +- pelicun/base.py | 743 +++++++------ pelicun/file_io.py | 142 +-- pelicun/model/__init__.py | 2 +- pelicun/model/asset_model.py | 169 ++- pelicun/model/damage_model.py | 602 ++++++----- pelicun/model/demand_model.py | 517 ++++----- pelicun/model/loss_model.py | 994 +++++++++--------- pelicun/model/pelicun_model.py | 100 +- pelicun/{warnings.py => pelicun_warnings.py} | 25 +- .../SimCenterDBDL/damage_DB_FEMA_P58_2nd.json | 146 +-- .../damage_DB_Hazus_EQ_bldg.json | 18 +- .../damage_DB_Hazus_EQ_story.json | 6 +- .../loss_repair_DB_FEMA_P58_2nd.json | 146 +-- pelicun/resources/auto/Hazus_Earthquake_IM.py | 732 ++++++------- .../resources/auto/Hazus_Earthquake_Story.py | 138 ++- pelicun/settings/default_units.json | 2 +- pelicun/tests/__init__.py | 2 + .../data/base/test_parse_units/duplicate.json | 2 +- .../base/test_parse_units/duplicate2.json | 2 +- pelicun/tests/basic/reset_tests.py | 40 +- pelicun/tests/basic/test_assessment.py | 31 +- pelicun/tests/basic/test_asset_model.py | 68 +- pelicun/tests/basic/test_auto.py | 49 +- pelicun/tests/basic/test_base.py | 425 ++++---- pelicun/tests/basic/test_damage_model.py | 257 +++-- pelicun/tests/basic/test_demand_model.py | 312 +++--- pelicun/tests/basic/test_file_io.py | 116 +- pelicun/tests/basic/test_loss_model.py | 376 ++++--- pelicun/tests/basic/test_model.py | 21 +- pelicun/tests/basic/test_pelicun_model.py | 26 +- pelicun/tests/basic/test_uq.py | 585 ++++++----- pelicun/tests/code_repetition_checker.py | 31 +- pelicun/tests/dl_calculation/e1/test_e1.py | 68 +- pelicun/tests/dl_calculation/e2/test_e2.py | 67 +- pelicun/tests/dl_calculation/e3/test_e3.py | 67 +- pelicun/tests/dl_calculation/e4/test_e4.py | 67 +- pelicun/tests/dl_calculation/e5/test_e5.py | 67 +- pelicun/tests/dl_calculation/e6/test_e6.py | 67 +- pelicun/tests/dl_calculation/e7/auto_HU_NJ.py | 112 +- pelicun/tests/dl_calculation/e7/test_e7.py | 77 +- pelicun/tests/dl_calculation/e8/auto_HU_LA.py | 99 +- pelicun/tests/dl_calculation/e8/test_e8.py | 78 +- pelicun/tests/dl_calculation/e9/custom_pop.py | 98 +- pelicun/tests/dl_calculation/e9/test_e9.py | 79 +- .../tests/dl_calculation/other/o1/run_o1.py | 39 +- .../rulesets/BldgClassRulesets.py | 95 +- .../rulesets/BuildingClassRulesets.py | 96 +- .../rulesets/FloodAssmRulesets.py | 37 +- .../rulesets/FloodClassRulesets.py | 150 +-- .../dl_calculation/rulesets/FloodRulesets.py | 150 +-- .../rulesets/MetaVarRulesets.py | 252 ++--- .../rulesets/WindCECBRulesets.py | 83 +- .../rulesets/WindCERBRulesets.py | 83 +- .../dl_calculation/rulesets/WindEFRulesets.py | 238 ++--- .../rulesets/WindMECBRulesets.py | 97 +- .../rulesets/WindMERBRulesets.py | 97 +- .../dl_calculation/rulesets/WindMHRulesets.py | 57 +- .../rulesets/WindMLRIRulesets.py | 78 +- .../rulesets/WindMLRMRulesets.py | 181 ++-- .../rulesets/WindMMUHRulesets.py | 130 ++- .../rulesets/WindMSFRulesets.py | 171 ++- .../rulesets/WindMetaVarRulesets.py | 270 ++--- .../rulesets/WindSECBRulesets.py | 93 +- .../rulesets/WindSERBRulesets.py | 93 +- .../rulesets/WindSPMBRulesets.py | 56 +- .../rulesets/WindWMUHRulesets.py | 168 ++- .../rulesets/WindWSFRulesets.py | 185 ++-- .../tests/maintenance/search_in_functions.py | 59 +- pelicun/tests/util.py | 33 +- .../validation/inactive/3d_interpolation.py | 40 +- .../inactive/pandas_convert_speed.py | 92 +- pelicun/tests/validation/inactive/readme.md | 2 +- .../tests/validation/v2/test_validation_2.py | 4 +- pelicun/tools/DL_calculation.py | 540 +++++----- pelicun/tools/HDF_to_CSV.py | 23 +- pelicun/uq.py | 788 +++++++------- pytest.ini | 1 + run_checks.sh | 7 +- setup.py | 65 +- 83 files changed, 6887 insertions(+), 6444 deletions(-) rename pelicun/{warnings.py => pelicun_warnings.py} (87%) diff --git a/README.md b/README.md index 10ff2efc5..1f2d06ee3 100644 --- a/README.md +++ b/README.md @@ -124,9 +124,9 @@ Feel free to [open an issue](https://github.com/NHERI-SimCenter/pelicun/issues/n - **Location-specific damage processes**: This new feature is useful when you want damage to a component type to induce damage in another component type at the same location only. For example, damaged water pipes on a specific story can trigger damage in floor covering only on that specific story. Location-matching is performed automatically without you having to define component pairs for every location using the following syntax: `'1_CMP.A-LOC', {'DS1': 'CMP.B_DS1'}` , where DS1 of `CMP.A` at each location triggers DS1 of `CMP.B` at the same location. - - **New `custom_model_dir` argument for `DL_calculation`**: This argument allows users to prepare custom damage and loss model files in a folder and pass the path to that folder to an auto-population script through `DL_calculation`. Within the auto-population script, they can reference only the name of the files in that folder. This provides portability for simulations that use custom models and auto population, such as some of the advanced regional simualtions in [SimCenter's R2D Tool](https://simcenter.designsafe-ci.org/research-tools/r2dtool/). + - **New `custom_model_dir` argument for `DL_calculation`**: This argument allows users to prepare custom damage and loss model files in a folder and pass the path to that folder to an auto-population script through `DL_calculation`. Within the auto-population script, they can reference only the name of the files in that folder. This provides portability for simulations that use custom models and auto population, such as some of the advanced regional simulations in [SimCenter's R2D Tool](https://simcenter.designsafe-ci.org/research-tools/r2dtool/). - - **Extend Hazus EQ auto population sripts to include water networks**: Automatically recognize water network assets and map them to archetypes from the Hazus Earthquake technical manual. + - **Extend Hazus EQ auto population scripts to include water networks**: Automatically recognize water network assets and map them to archetypes from the Hazus Earthquake technical manual. - **Introduce `convert_units` function**: Provide streamlined unit conversion using the pre-defined library of units in Pelicun. Allows you to convert a variable from one unit to another using a single line of simple code, such as `converted_height = pelicun.base.convert_units(raw_height, unit='m', to_unit='ft')` @@ -140,7 +140,7 @@ Feel free to [open an issue](https://github.com/NHERI-SimCenter/pelicun/issues/n - **Automatic code formatting**: Further improve consistency in coding style by using [black](https://black.readthedocs.io/en/stable/) to review and format the code when needed. - - **Remove `bldg` from variable and class names**: Following the changes mentioned earlier, we dropped `bldg` from lables where the functionality is no longer limited to buildings. + - **Remove `bldg` from variable and class names**: Following the changes mentioned earlier, we dropped `bldg` from labels where the functionality is no longer limited to buildings. - **Introduce `calibrated` attribute for demand model**: This new attribute will allow users to check if a model has already been calibrated to the provided empirical data. diff --git a/pelicun/__init__.py b/pelicun/__init__.py index 6f8cdc995..3b8d6118f 100644 --- a/pelicun/__init__.py +++ b/pelicun/__init__.py @@ -1,4 +1,41 @@ -"""Main public package.""" +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California + +# This file is part of pelicun. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. + +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + +# Contributors: +# Adam Zsarnóczay + +"""Pelicun library.""" name = 'pelicun' diff --git a/pelicun/assessment.py b/pelicun/assessment.py index 271fcc1c0..db9d318af 100644 --- a/pelicun/assessment.py +++ b/pelicun/assessment.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,29 +37,23 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -This module has classes and methods that control the performance assessment. -""" +"""Classes and methods that control the performance assessment.""" from __future__ import annotations -from typing import Any + import json +from pathlib import Path +from typing import Any + import numpy as np import pandas as pd -from pelicun.base import get -from pelicun import base -from pelicun import uq -from pelicun import file_io -from pelicun import model -from pelicun.base import EDP_to_demand_type +from pelicun import base, file_io, model, uq from pelicun.__init__ import __version__ as pelicun_version # type: ignore +from pelicun.base import EDP_to_demand_type, get - -# pylint: disable=consider-using-namedtuple-or-dataclass - -default_DBs = { +default_dbs = { 'fragility': { 'FEMA P-58': 'damage_DB_FEMA_P58_2nd.csv', 'Hazus Earthquake - Buildings': 'damage_DB_Hazus_EQ_bldg.csv', @@ -80,23 +73,21 @@ default_damage_processes = { 'FEMA P-58': { - "1_excessive.coll.DEM": {"DS1": "collapse_DS1"}, - "2_collapse": {"DS1": "ALL_NA"}, - "3_excessiveRID": {"DS1": "irreparable_DS1"}, + '1_excessive.coll.DEM': {'DS1': 'collapse_DS1'}, + '2_collapse': {'DS1': 'ALL_NA'}, + '3_excessiveRID': {'DS1': 'irreparable_DS1'}, }, - # TODO: expand with ground failure logic + # TODO(AZ): expand with ground failure logic 'Hazus Earthquake': { - "1_STR": {"DS5": "collapse_DS1"}, - "2_LF": {"DS5": "collapse_DS1"}, - "3_excessive.coll.DEM": {"DS1": "collapse_DS1"}, - "4_collapse": {"DS1": "ALL_NA"}, - "5_excessiveRID": {"DS1": "irreparable_DS1"}, + '1_STR': {'DS5': 'collapse_DS1'}, + '2_LF': {'DS5': 'collapse_DS1'}, + '3_excessive.coll.DEM': {'DS1': 'collapse_DS1'}, + '4_collapse': {'DS1': 'ALL_NA'}, + '5_excessiveRID': {'DS1': 'irreparable_DS1'}, }, 'Hazus Hurricane': {}, } -# pylint: enable=consider-using-namedtuple-or-dataclass - class AssessmentBase: """ @@ -107,24 +98,25 @@ class AssessmentBase: """ __slots__: list[str] = [ - 'stories', - 'options', - 'unit_conversion_factors', - 'log', - 'demand', 'asset', 'damage', + 'demand', + 'log', 'loss', + 'options', + 'stories', + 'unit_conversion_factors', ] - def __init__(self, config_options: dict[str, Any] | None = None): + def __init__(self, config_options: dict[str, Any] | None = None) -> None: """ - Initializes an Assessment object. + Initialize an Assessment object. Parameters ---------- - config_options (Optional[dict]): + config_options: User-specified configuration dictionary. + """ self.stories: int | None = None self.options = base.Options(config_options, self) @@ -146,9 +138,9 @@ def __init__(self, config_options: dict[str, Any] | None = None): self.loss: model.LossModel = model.LossModel(self) @property - def bldg_repair(self): + def bldg_repair(self) -> model.LossModel: """ - + Exists for . Returns ------- @@ -156,7 +148,7 @@ def bldg_repair(self): The loss model. """ - self.log.warn( + self.log.warning( '`.bldg_repair` is deprecated and will be dropped in ' 'future versions of pelicun. ' 'Please use `.loss` instead.' @@ -165,9 +157,9 @@ def bldg_repair(self): return self.loss @property - def repair(self): + def repair(self) -> model.LossModel: """ - + Exists for . Returns ------- @@ -175,7 +167,7 @@ def repair(self): The damage state-driven component loss model. """ - self.log.warn( + self.log.warning( '`.repair` is deprecated and will be dropped in ' 'future versions of pelicun. ' 'Please use `.loss` instead.' @@ -184,6 +176,8 @@ def repair(self): def get_default_data(self, data_name: str) -> pd.DataFrame: """ + Load a default data file. + Loads a default data file by name and returns it. This method is specifically designed to access predefined CSV files from a structured directory path related to the SimCenter fragility @@ -191,7 +185,7 @@ def get_default_data(self, data_name: str) -> pd.DataFrame: Parameters ---------- - data_name : str + data_name: str The name of the CSV file to be loaded, without the '.csv' extension. This name is used to construct the full path to the file. @@ -201,19 +195,19 @@ def get_default_data(self, data_name: str) -> pd.DataFrame: pd.DataFrame The DataFrame containing the data loaded from the specified CSV file. - """ + """ # if 'fragility_DB' in data_name: data_name = data_name.replace('fragility_DB', 'damage_DB') - self.log.warn( + self.log.warning( '`fragility_DB` is deprecated and will be dropped in ' 'future versions of pelicun. ' 'Please use `damage_DB` instead.' ) if 'bldg_repair_DB' in data_name: data_name = data_name.replace('bldg_repair_DB', 'loss_repair_DB') - self.log.warn( + self.log.warning( '`bldg_repair_DB` is deprecated and will be dropped in ' 'future versions of pelicun. ' 'Please use `loss_repair_DB` instead.' @@ -243,25 +237,26 @@ def get_default_metadata(self, data_name: str) -> dict: Default metadata """ - # if 'fragility_DB' in data_name: data_name = data_name.replace('fragility_DB', 'damage_DB') - self.log.warn( + self.log.warning( '`fragility_DB` is deprecated and will be dropped in ' 'future versions of pelicun. Please use `damage_DB` instead.' ) data_path = f'{base.pelicun_path}/resources/SimCenterDBDL/{data_name}.json' - with open(data_path, 'r', encoding='utf-8') as f: + with Path(data_path).open(encoding='utf-8') as f: data = json.load(f) - return data + return data # noqa: RET504 def calc_unit_scale_factor(self, unit: str) -> float: """ + Determine unit scale factor. + Determines the scale factor from input unit to the - corresponding base unit + corresponding base unit. Parameters ---------- @@ -279,8 +274,8 @@ def calc_unit_scale_factor(self, unit: str) -> float: ------ KeyError When an invalid unit is specified - """ + """ unit_lst = unit.strip().split(' ') # check if there is a quantity specified; if yes, parse it @@ -296,14 +291,15 @@ def calc_unit_scale_factor(self, unit: str) -> float: scale_factor = unit_count * self.unit_conversion_factors[unit_name] except KeyError as exc: - raise KeyError( - f"Specified unit not recognized: {unit_count} {unit_name}" - ) from exc + msg = f'Specified unit not recognized: {unit_count} {unit_name}' + raise KeyError(msg) from exc return scale_factor def scale_factor(self, unit: str | None) -> float: """ + Get scale factor of given unit. + Returns the scale factor of a given unit. If the unit is unknown it raises an error. If the unit is None it returns 1.00. @@ -324,13 +320,13 @@ def scale_factor(self, unit: str | None) -> float: If the unit is unknown. """ - if unit is not None: if unit in self.unit_conversion_factors: scale_factor = self.unit_conversion_factors[unit] else: - raise ValueError(f"Unknown unit: {unit}") + msg = f'Unknown unit: {unit}' + raise ValueError(msg) else: scale_factor = 1.0 @@ -361,9 +357,9 @@ def calculate_damage( block_batch_size: int = 1000, ) -> None: """ - Calculates damage. + Calculate damage. - Paraemters + Parameters ---------- num_stories: int Number of stories of the asset. Applicable to buildings. @@ -385,7 +381,7 @@ def calculate_damage( optional keys: 'marginals', 'empirical', and 'correlation'. The value under each key shall be a DataFrame. - cmp_data_source : str or dict + cmp_data_source: str or dict The source from where to load the component model data. If it's a string, it should be the prefix for three files: one for marginal distributions (`_marginals.csv`), @@ -414,11 +410,11 @@ def calculate_damage( subtraction, '*' for multiplication, and '/' for division. residual_drift_configuration: dict Dictionary containing the following keys-values: - - params : dict + - params: dict A dictionary containing parameters required for the estimation method, such as 'yield_drift', which is the drift at which yielding is expected to occur. - - method : str, optional + - method: str, optional The method used to estimate the RID values. Currently, only 'FEMA P58' is implemented. Defaults to 'FEMA P58'. collapse_fragility_configuration: dict @@ -438,7 +434,7 @@ def calculate_damage( Maximum number of components in each batch. """ - # TODO: when we build the API docs, ensure the above is + # TODO(JVM): when we build the API docs, ensure the above is # properly rendered. self.demand.load_model(demand_data_source) @@ -472,9 +468,9 @@ def calculate_loss( loss_model_data_paths: list[str | pd.DataFrame], loss_map_path: str | pd.DataFrame | None = None, loss_map_policy: str | None = None, - ): + ) -> None: """ - Calculates loss. + Calculate loss. Parameters ---------- @@ -514,9 +510,9 @@ def aggregate_loss( tuple[uq.RandomVariableRegistry, dict[str, float]] | None ) = None, loss_combination: dict | None = None, - ): + ) -> tuple[pd.DataFrame, pd.DataFrame]: """ - Aggregates losses. + Aggregate losses. Parameters ---------- @@ -528,7 +524,7 @@ def aggregate_loss( thresholds. If the aggregated value for a decision variable (conditioned on no replacement) exceeds the threshold, then replacement is triggered. This can happen - for multuple decision variables at the same + for multiple decision variables at the same realization. The consequence keyword `replacement` is reserved to represent exclusive triggering of the replacement consequences, and other consequences are @@ -536,9 +532,9 @@ def aggregate_loss( triggered. When assigned to None, then `replacement` is still treated as an exclusive consequence (other consequences are set to zero when replacement is nonzero) - but it is not being additinally triggered by the + but it is not being additionally triggered by the exceedance of any thresholds. The aggregated loss sample - conains an additional column with information on whether + contains an additional column with information on whether replacement was already present or triggered by a threshold exceedance for each realization. loss_combination: dict, optional @@ -563,8 +559,8 @@ def aggregate_loss( components. In this case the (`c1`, `c2`) tuple should contain M elements instead of two. - Note - ---- + Notes + ----- Regardless of the value of the arguments, this method does not alter the state of the loss model, i.e., it does not modify the values of the `.sample` attributes. @@ -578,59 +574,53 @@ def aggregate_loss( replacement. If no thresholds are specified it only contains False values. - Raises - ------ - ValueError - When inputs are invalid. - """ - - return self.loss.aggregate_losses( + output = self.loss.aggregate_losses( replacement_configuration, loss_combination, future=True ) + assert isinstance(output, tuple) + return output class DLCalculationAssessment(AssessmentBase): - """ - Base class for the assessment objects used in `DL_calculation.py` - - """ + """Base class for the assessment objects used in `DL_calculation.py`.""" __slots__: list[str] = [] - def calculate_demand( + def calculate_demand( # noqa: C901 self, - demand_path: str, + demand_path: Path, collapse_limits: dict[str, float] | None, length_unit: str | None, demand_calibration: dict | None, sample_size: int, - coupled_demands: bool, demand_cloning: dict | None, residual_drift_inference: dict | None, + *, + coupled_demands: bool, ) -> None: """ - Calculates demands. + Calculate demands. Parameters ---------- - demand_path : str + demand_path: str Path to the demand data file. - collapse_limits : dict[str, float] or None + collapse_limits: dict[str, float] or None Optional dictionary with demand types and their respective collapse limits. length_unit : str, optional Unit of length to be used to add units to the demand data if needed. - demand_calibration : dict or None + demand_calibration: dict or None Calibration data for the demand model. - sample_size : int + sample_size: int Number of realizations. - coupled_demands : bool + coupled_demands: bool Whether to preserve the raw order of the demands. - demand_cloning : dict or None + demand_cloning: dict or None Demand cloning configuration. - residual_drift_inference : dict or None + residual_drift_inference: dict or None Information for residual drift inference. Raises @@ -639,7 +629,6 @@ def calculate_demand( When an unknown residual drift method is specified. """ - idx = pd.IndexSlice raw_demands = pd.read_csv(demand_path, index_col=0) @@ -651,29 +640,30 @@ def calculate_demand( if 'Units' in raw_demands.index: raw_units = raw_demands.loc['Units', :] - raw_demands.drop('Units', axis=0, inplace=True) + raw_demands = raw_demands.drop('Units', axis=0) else: raw_units = None - DEM_to_drop = np.full(raw_demands.shape[0], False) + dem_to_drop = np.full(raw_demands.shape[0], fill_value=False) - for DEM_type, limit in collapse_limits.items(): - assert isinstance(DEM_type, str) + for dem_type, limit in collapse_limits.items(): + assert isinstance(dem_type, str) assert isinstance(limit, (str, float)) - if raw_demands.columns.nlevels == 4: - DEM_to_drop += raw_demands.loc[ + nlevels_with_event_id = 4 + if raw_demands.columns.nlevels == nlevels_with_event_id: + dem_to_drop += raw_demands.loc[ :, # type: ignore - idx[:, DEM_type, :, :], + idx[:, dem_type, :, :], ].max(axis=1) > float(limit) else: - DEM_to_drop += raw_demands.loc[ + dem_to_drop += raw_demands.loc[ :, # type: ignore - idx[DEM_type, :, :], + idx[dem_type, :, :], ].max(axis=1) > float(limit) - raw_demands = raw_demands.loc[~DEM_to_drop, :] + raw_demands = raw_demands.loc[~dem_to_drop, :] if isinstance(raw_units, pd.Series): raw_demands = pd.concat( @@ -681,15 +671,16 @@ def calculate_demand( ) self.log.msg( - f"{np.sum(DEM_to_drop)} realizations removed from the demand " - f"input because they exceed the collapse limit. The remaining " - f"sample size: {raw_demands.shape[0]}" + f'{np.sum(dem_to_drop)} realizations removed from the demand ' + f'input because they exceed the collapse limit. The remaining ' + f'sample size: {raw_demands.shape[0]}' ) # add units to the demand data if needed - if "Units" not in raw_demands.index: + if 'Units' not in raw_demands.index: if length_unit is None: - raise ValueError('A length unit is required to infer demand units.') + msg = 'A length unit is required to infer demand units.' + raise ValueError(msg) demands = _add_units(raw_demands, length_unit) else: @@ -706,12 +697,12 @@ def calculate_demand( else: # if no calibration is requested, # set all demands to use empirical distribution - self.demand.calibrate_model({"ALL": {"DistributionFamily": "empirical"}}) + self.demand.calibrate_model({'ALL': {'DistributionFamily': 'empirical'}}) # and generate a new demand sample self.demand.generate_sample( { - "SampleSize": sample_size, + 'SampleSize': sample_size, 'PreserveRawOrder': coupled_demands, 'DemandCloning': demand_cloning, } @@ -728,46 +719,44 @@ def calculate_demand( # get residual drift estimates, if needed if residual_drift_inference: - # `method` is guaranteed to exist because it is confirmed when # parsing the configuration file. rid_inference_method = residual_drift_inference.pop('method') if rid_inference_method == 'FEMA P-58': - RID_list: list[pd.DataFrame] = [] - PID = demand_sample['PID'].copy() - PID.drop('Units', inplace=True) - PID = PID.astype(float) + rid_list: list[pd.DataFrame] = [] + pid = demand_sample['PID'].copy() + pid = pid.drop('Units') + pid = pid.astype(float) for direction, delta_yield in residual_drift_inference.items(): - - pids = PID.loc[:, idx[:, direction]] # type: ignore + pids = pid.loc[:, idx[:, direction]] # type: ignore assert isinstance(pids, pd.DataFrame) - RID = self.demand.estimate_RID( + rid = self.demand.estimate_RID( pids, {'yield_drift': float(delta_yield)}, ) - RID_list.append(RID) + rid_list.append(rid) - RID = pd.concat(RID_list, axis=1) - RID_units = pd.Series( - ['unitless'] * RID.shape[1], - index=RID.columns, + rid = pd.concat(rid_list, axis=1) + rid_units = pd.Series( + ['unitless'] * rid.shape[1], + index=rid.columns, name='Units', ) - RID_sample = pd.concat([RID, RID_units.to_frame().T]) - demand_sample = pd.concat([demand_sample, RID_sample], axis=1) + rid_sample = pd.concat([rid, rid_units.to_frame().T]) + demand_sample = pd.concat([demand_sample, rid_sample], axis=1) else: - - raise ValueError( + msg = ( f'Unknown residual drift inference method: ' f'`{rid_inference_method}`.' ) + raise ValueError(msg) # add a constant one demand - demand_sample[('ONE', '0', '1')] = np.ones(demand_sample.shape[0]) + demand_sample['ONE', '0', '1'] = np.ones(demand_sample.shape[0]) demand_sample.loc['Units', ('ONE', '0', '1')] = 'unitless' self.demand.load_sample(base.convert_to_SimpleIndex(demand_sample, axis=1)) @@ -777,23 +766,24 @@ def calculate_asset( num_stories: int, component_assignment_file: str | None, collapse_fragility_demand_type: str | None, - add_irreparable_damage_columns: bool, component_sample_file: str | None, - ): + *, + add_irreparable_damage_columns: bool, + ) -> None: """ - Generates the asset model sample. + Generate the asset model sample. Parameters ---------- - num_stories : int + num_stories: int Number of stories. - component_assignment_file : str or None + component_assignment_file: str or None Path to a component assignment file. - collapse_fragility_demand_type : str or None + collapse_fragility_demand_type: str or None Optional demand type for the collapse fragility. - add_irreparable_damage_columns : bool + add_irreparable_damage_columns: bool Whether to add columns for irreparable damage. - component_sample_file : str or None + component_sample_file: str or None Optional path to an existing component sample file. Raises @@ -802,7 +792,6 @@ def calculate_asset( With invalid combinations of arguments. """ - # retrieve the demand sample demand_sample = self.demand.save_sample() assert isinstance(demand_sample, pd.DataFrame) @@ -817,11 +806,12 @@ def calculate_asset( component_assignment_file is not None and component_sample_file is not None ): - raise ValueError( + msg = ( 'Both `component_assignment_file` and ' '`component_sample_file` are provided. ' 'Please provide only one.' ) + raise ValueError(msg) # load a component model and generate a sample if component_assignment_file is not None: @@ -831,7 +821,7 @@ def calculate_asset( encoding_errors='replace', ) - DEM_types = demand_sample.columns.unique(level=0) + dem_types = demand_sample.columns.unique(level=0) # add component(s) to support collapse calculation if collapse_fragility_demand_type is not None: @@ -840,7 +830,7 @@ def calculate_asset( # (otherwise we have a global demand and evaluate # collapse directly, so this code should be skipped) - if collapse_fragility_demand_type in DEM_types: + if collapse_fragility_demand_type in dem_types: # excessive coll_DEM is added on every floor # to detect large RIDs cmp_marginals.loc['excessive.coll.DEM', 'Units'] = 'ea' @@ -862,7 +852,6 @@ def calculate_asset( cmp_marginals.loc['excessive.coll.DEM', 'Theta_0'] = 1.0 else: - self.log.msg( f'WARNING: No {collapse_fragility_demand_type} ' f'among available demands. Collapse cannot ' @@ -877,7 +866,7 @@ def calculate_asset( # add components to support irreparable damage calculation if add_irreparable_damage_columns: - if 'RID' in DEM_types: + if 'RID' in dem_types: # excessive RID is added on every floor to detect large RIDs cmp_marginals.loc['excessiveRID', 'Units'] = 'ea' @@ -913,41 +902,42 @@ def calculate_asset( if component_sample_file is not None: self.asset.load_cmp_sample(component_sample_file) - def calculate_damage( + def calculate_damage( # noqa: C901 self, - length_unit: float | None, + length_unit: str | None, component_database: str, component_database_path: str | None = None, collapse_fragility: dict | None = None, - is_for_water_network_assessment: bool = False, irreparable_damage: dict | None = None, damage_process_approach: str | None = None, damage_process_file_path: str | None = None, custom_model_dir: str | None = None, + *, + is_for_water_network_assessment: bool = False, ) -> None: """ - Calculates damage. + Calculate damage. Parameters ---------- length_unit : str, optional Unit of length to be used to add units to the demand data if needed. - component_database : str + component_database: str Name of the component database. - component_database_path : str or None + component_database_path: str or None Optional path to a component database file. - collapse_fragility : dict or None + collapse_fragility: dict or None Collapse fragility information. - is_for_water_network_assessment : bool + is_for_water_network_assessment: bool Whether the assessment is for a water network. - irreparable_damage : dict or None + irreparable_damage: dict or None Information for irreparable damage. - damage_process_approach : str or None + damage_process_approach: str or None Approach for the damage process. - damage_process_file_path : str or None + damage_process_file_path: str or None Optional path to a damage process file. - custom_model_dir : str or None + custom_model_dir: str or None Optional directory for custom models. Raises @@ -956,22 +946,21 @@ def calculate_damage( With invalid combinations of arguments. """ - # load the fragility information - if component_database in default_DBs['fragility']: + if component_database in default_dbs['fragility']: component_db = [ - 'PelicunDefault/' + default_DBs['fragility'][component_database], + 'PelicunDefault/' + default_dbs['fragility'][component_database], ] else: component_db = [] if component_database_path is not None: - if custom_model_dir is None: - raise ValueError( + msg = ( '`custom_model_dir` needs to be specified ' 'when `component_database_path` is not None.' ) + raise ValueError(msg) if 'CustomDLDataFolder' in component_database_path: component_database_path = component_database_path.replace( @@ -985,12 +974,11 @@ def calculate_damage( # prepare additional fragility data # get the database header from the default P58 db - P58_data = self.get_default_data('damage_DB_FEMA_P58_2nd') + p58_data = self.get_default_data('damage_DB_FEMA_P58_2nd') - adf = pd.DataFrame(columns=P58_data.columns) + adf = pd.DataFrame(columns=p58_data.columns) if collapse_fragility: - assert self.asset.cmp_marginal_params is not None if ( @@ -998,70 +986,72 @@ def calculate_damage( in self.asset.cmp_marginal_params.index.get_level_values('cmp') ): # if there is story-specific evaluation - coll_CMP_name = 'excessive.coll.DEM' + coll_cmp_name = 'excessive.coll.DEM' else: # otherwise, for global collapse evaluation - coll_CMP_name = 'collapse' + coll_cmp_name = 'collapse' - adf.loc[coll_CMP_name, ('Demand', 'Directional')] = 1 - adf.loc[coll_CMP_name, ('Demand', 'Offset')] = 0 + adf.loc[coll_cmp_name, ('Demand', 'Directional')] = 1 + adf.loc[coll_cmp_name, ('Demand', 'Offset')] = 0 - coll_DEM = collapse_fragility['DemandType'] + coll_dem = collapse_fragility['DemandType'] - if '_' in coll_DEM: - coll_DEM, coll_DEM_spec = coll_DEM.split('_') + if '_' in coll_dem: + coll_dem, coll_dem_spec = coll_dem.split('_') else: - coll_DEM_spec = None + coll_dem_spec = None - coll_DEM_name = None + coll_dem_name = None for demand_name, demand_short in EDP_to_demand_type.items(): - if demand_short == coll_DEM: - coll_DEM_name = demand_name + if demand_short == coll_dem: + coll_dem_name = demand_name break - if coll_DEM_name is None: - raise ValueError( - "A valid demand type acronym was not provided in" - "the configuration file. Please ensure the" + if coll_dem_name is None: + msg = ( + 'A valid demand type acronym was not provided in' + 'the configuration file. Please ensure the' "'DemandType' field in the collapse fragility" - "section contains one of the recognized acronyms" + 'section contains one of the recognized acronyms' "(e.g., 'SA', 'PFA', 'PGA'). Refer to the" "configuration file's 'collapse_fragility'" - "section." + 'section.' ) + raise ValueError(msg) - if coll_DEM_spec is None: - adf.loc[coll_CMP_name, ('Demand', 'Type')] = coll_DEM_name + if coll_dem_spec is None: + adf.loc[coll_cmp_name, ('Demand', 'Type')] = coll_dem_name else: - adf.loc[coll_CMP_name, ('Demand', 'Type')] = ( - f'{coll_DEM_name}|{coll_DEM_spec}' + adf.loc[coll_cmp_name, ('Demand', 'Type')] = ( + f'{coll_dem_name}|{coll_dem_spec}' ) if length_unit is None: - raise ValueError('A length unit is required.') - coll_DEM_unit = _add_units( + msg = 'A length unit is required.' + raise ValueError(msg) + coll_dem_unit = _add_units( pd.DataFrame( columns=[ - f'{coll_DEM}-1-1', + f'{coll_dem}-1-1', ] ), length_unit, ).iloc[0, 0] - adf.loc[coll_CMP_name, ('Demand', 'Unit')] = coll_DEM_unit - adf.loc[coll_CMP_name, ('LS1', 'Family')] = collapse_fragility[ + adf.loc[coll_cmp_name, ('Demand', 'Unit')] = coll_dem_unit + adf.loc[coll_cmp_name, ('LS1', 'Family')] = collapse_fragility[ 'CapacityDistribution' ] - adf.loc[coll_CMP_name, ('LS1', 'Theta_0')] = collapse_fragility[ + adf.loc[coll_cmp_name, ('LS1', 'Theta_0')] = collapse_fragility[ 'CapacityMedian' ] - adf.loc[coll_CMP_name, ('LS1', 'Theta_1')] = collapse_fragility[ + adf.loc[coll_cmp_name, ('LS1', 'Theta_1')] = collapse_fragility[ 'Theta_1' ] - adf.loc[coll_CMP_name, 'Incomplete'] = 0 + adf.loc[coll_cmp_name, 'Incomplete'] = 0 - if coll_CMP_name != 'collapse': + if coll_cmp_name != 'collapse': # for story-specific evaluation, we need to add a placeholder # fragility that will never trigger, but helps us aggregate # results in the end @@ -1084,7 +1074,6 @@ def calculate_damage( adf.loc['collapse', 'Incomplete'] = 0 if irreparable_damage: - # add excessive RID fragility according to settings provided in the # input file adf.loc['excessiveRID', ('Demand', 'Directional')] = 1 @@ -1097,7 +1086,7 @@ def calculate_damage( adf.loc['excessiveRID', ('LS1', 'Theta_0')] = irreparable_damage[ 'DriftCapacityMedian' ] - adf.loc['excessiveRID', ('LS1', 'Family')] = "lognormal" + adf.loc['excessiveRID', ('LS1', 'Family')] = 'lognormal' adf.loc['excessiveRID', ('LS1', 'Theta_1')] = irreparable_damage[ 'DriftCapacityLogStd' ] @@ -1113,7 +1102,7 @@ def calculate_damage( adf.loc['irreparable', ('LS1', 'Theta_0')] = 1e10 adf.loc['irreparable', 'Incomplete'] = 0 - # TODO: we can improve this by creating a water + # TODO(AZ): we can improve this by creating a water # network-specific assessment class if is_for_water_network_assessment: # add a placeholder aggregate fragility that will never trigger @@ -1128,20 +1117,18 @@ def calculate_damage( adf.loc['aggregate', 'Incomplete'] = 0 self.damage.load_model_parameters( - component_db + [adf], + [*component_db, adf], set(self.asset.list_unique_component_ids()), ) # load the damage process if needed dmg_process = None - if damage_process_approach is not None: - + if damage_process_approach is not None: # noqa: PLR1702 if damage_process_approach in default_damage_processes: dmg_process = default_damage_processes[damage_process_approach] # For Hazus Earthquake, we need to specify the component ids if damage_process_approach == 'Hazus Earthquake': - cmp_sample = self.asset.save_cmp_sample() assert isinstance(cmp_sample, pd.DataFrame) @@ -1159,7 +1146,7 @@ def calculate_damage( # first, look at the source component id new_source = None for cmp_type, cmp_id in cmp_map.items(): - if (cmp_type in source_cmp) and (cmp_id != ''): + if (cmp_type in source_cmp) and (cmp_id != ''): # noqa: PLC1901 new_source = source_cmp.replace(cmp_type, cmp_id) break @@ -1173,8 +1160,8 @@ def calculate_damage( for ds_i, target_vals in action.items(): if isinstance(target_vals, str): for cmp_type, cmp_id in cmp_map.items(): - if (cmp_type in target_vals) and (cmp_id != ''): - target_vals = target_vals.replace( + if (cmp_type in target_vals) and (cmp_id != ''): # noqa: PLC1901 + target_vals = target_vals.replace( # noqa: PLW2901 cmp_type, cmp_id ) @@ -1187,9 +1174,9 @@ def calculate_damage( for target_val in target_vals: for cmp_type, cmp_id in cmp_map.items(): if (cmp_type in target_val) and ( - cmp_id != '' + cmp_id != '' # noqa: PLC1901 ): - target_val = target_val.replace( + target_val = target_val.replace( # noqa: PLW2901 cmp_type, cmp_id ) @@ -1199,27 +1186,27 @@ def calculate_damage( dmg_process = new_dmg_process - elif damage_process_approach == "User Defined": - + elif damage_process_approach == 'User Defined': if damage_process_file_path is None: - raise ValueError( + msg = ( 'When `damage_process_approach` is set to ' '`User Defined`, a `damage_process_file_path` ' 'needs to be provided.' ) + raise ValueError(msg) # load the damage process from a file - with open(damage_process_file_path, 'r', encoding='utf-8') as f: + with Path(damage_process_file_path).open(encoding='utf-8') as f: dmg_process = json.load(f) - elif damage_process_approach == "None": + elif damage_process_approach == 'None': # no damage process applied for the calculation dmg_process = None else: self.log.msg( - f"Prescribed Damage Process not recognized: " - f"`{damage_process_approach}`." + f'Prescribed Damage Process not recognized: ' + f'`{damage_process_approach}`.' ) # calculate damages @@ -1238,37 +1225,37 @@ def calculate_loss( replacement_carbon_parameters: dict[str, float | str] | None = None, replacement_energy_parameters: dict[str, float | str] | None = None, loss_map_path: str | None = None, - decision_variables: list[str] | None = None, + decision_variables: tuple[str, ...] | None = None, ) -> tuple[pd.DataFrame, pd.DataFrame]: """ - Calculates losses. + Calculate losses. Parameters ---------- - loss_map_approach : str + loss_map_approach: str Approach for the loss map generation. Can be either `User Defined` or `Automatic`. - occupancy_type : str + occupancy_type: str Occupancy type. - consequence_database : str + consequence_database: str Name of the consequence database. - consequence_database_path : str or None + consequence_database_path: str or None Optional path to a consequence database file. - custom_model_dir : str or None + custom_model_dir: str or None Optional directory for custom models. - damage_process_approach : str + damage_process_approach: str Damage process approach. Defaults to `User Defined`. - replacement_cost_parameters : dict or None + replacement_cost_parameters: dict or None Parameters for replacement cost. - replacement_time_parameters : dict or None + replacement_time_parameters: dict or None Parameters for replacement time. - replacement_carbon_parameters : dict or None + replacement_carbon_parameters: dict or None Parameters for replacement carbon. - replacement_energy_parameters : dict or None + replacement_energy_parameters: dict or None Parameters for replacement energy. - loss_map_path : str or None + loss_map_path: str or None Optional path to a loss map file. - decision_variables : list[str] or None + decision_variables: tuple[str] or None Optional decision variables for the assessment. Returns @@ -1286,9 +1273,7 @@ def calculate_loss( When an invalid loss map approach is specified. """ - - conseq_df, consequence_db = load_consequence_info( - self, + conseq_df, consequence_db = self.load_consequence_info( consequence_database, consequence_database_path, custom_model_dir, @@ -1350,17 +1335,19 @@ def calculate_loss( # prepare the loss map loss_map = None - if loss_map_approach == "Automatic": + if loss_map_approach == 'Automatic': # get the damage sample loss_map = _loss__map_auto( self, conseq_df, damage_process_approach, occupancy_type ) - elif loss_map_approach == "User Defined": + elif loss_map_approach == 'User Defined': + assert custom_model_dir is not None loss_map = _loss__map_user(custom_model_dir, loss_map_path) else: - raise ValueError(f'Invalid MapApproach value: `{loss_map_approach}`.') + msg = f'Invalid MapApproach value: `{loss_map_approach}`.' + raise ValueError(msg) # prepare additional loss map entries, if needed if 'DMG-collapse' not in loss_map.index: @@ -1371,7 +1358,7 @@ def calculate_loss( self.loss.decision_variables = decision_variables self.loss.add_loss_map(loss_map, loss_map_policy=None) - self.loss.load_model_parameters(consequence_db + [adf]) + self.loss.load_model_parameters([*consequence_db, adf]) self.loss.calculate() @@ -1380,93 +1367,92 @@ def calculate_loss( assert isinstance(exceedance_bool_df, pd.DataFrame) return df_agg, exceedance_bool_df + def load_consequence_info( + self, + consequence_database: str, + consequence_database_path: str | None = None, + custom_model_dir: str | None = None, + ) -> tuple[pd.DataFrame, list[str]]: + """ + Load consequence information for the assessment. -def load_consequence_info( - self, - consequence_database: str, - consequence_database_path: str | None = None, - custom_model_dir: str | None = None, -) -> tuple[pd.DataFrame, list[str]]: - """ - Load consequence information for the assessment. + Parameters + ---------- + consequence_database: str + Name of the consequence database. + consequence_database_path: str or None + Optional path to a consequence database file. + custom_model_dir: str or None + Optional directory for custom models. - Parameters - ---------- - consequence_database : str - Name of the consequence database. - consequence_database_path : str or None - Optional path to a consequence database file. - custom_model_dir : str or None - Optional directory for custom models. + Returns + ------- + tuple[pd.DataFrame, list[str]] + A tuple containing: + - A DataFrame with the consequence data. + - A list of paths to the consequence databases used. - Returns - ------- - tuple[pd.DataFrame, list[str]] - A tuple containing: - - A DataFrame with the consequence data. - - A list of paths to the consequence databases used. + Raises + ------ + ValueError + With invalid combinations of arguments. - Raises - ------ - ValueError - With invalid combinations of arguments. + """ + if consequence_database in default_dbs['repair']: + consequence_db = [ + 'PelicunDefault/' + default_dbs['repair'][consequence_database], + ] - """ - if consequence_database in default_DBs['repair']: - consequence_db = [ - 'PelicunDefault/' + default_DBs['repair'][consequence_database], - ] + conseq_df = self.get_default_data( + default_dbs['repair'][consequence_database][:-4] + ) + else: + consequence_db = [] - conseq_df = self.get_default_data( - default_DBs['repair'][consequence_database][:-4] - ) - else: - consequence_db = [] + conseq_df = pd.DataFrame() - conseq_df = pd.DataFrame() + if consequence_database_path is not None: + if custom_model_dir is None: + msg = ( + 'When `consequence_database_path` is specified, ' + '`custom_model_dir` needs to be specified as well.' + ) + raise ValueError(msg) - if consequence_database_path is not None: + if 'CustomDLDataFolder' in consequence_database_path: + consequence_database_path = consequence_database_path.replace( + 'CustomDLDataFolder', custom_model_dir + ) - if custom_model_dir is None: - raise ValueError( - 'When `consequence_database_path` is specified, ' - '`custom_model_dir` needs to be specified as well.' - ) + consequence_db += [consequence_database_path] - if 'CustomDLDataFolder' in consequence_database_path: - consequence_database_path = consequence_database_path.replace( - 'CustomDLDataFolder', custom_model_dir + extra_conseq_df = file_io.load_data( + consequence_database_path, + unit_conversion_factors=None, + orientation=1, + reindex=False, ) + assert isinstance(extra_conseq_df, pd.DataFrame) - consequence_db += [consequence_database_path] - - extra_conseq_df = file_io.load_data( - consequence_database_path, - unit_conversion_factors=None, - orientation=1, - reindex=False, - ) - assert isinstance(extra_conseq_df, pd.DataFrame) - - if isinstance(conseq_df, pd.DataFrame): - conseq_df = pd.concat([conseq_df, extra_conseq_df]) - else: - conseq_df = extra_conseq_df + if isinstance(conseq_df, pd.DataFrame): + conseq_df = pd.concat([conseq_df, extra_conseq_df]) + else: + conseq_df = extra_conseq_df - consequence_db = consequence_db[::-1] + consequence_db = consequence_db[::-1] - return conseq_df, consequence_db + return conseq_df, consequence_db -def _add_units(raw_demands, length_unit): +def _add_units(raw_demands: pd.DataFrame, length_unit: str) -> pd.DataFrame: """ Add units to demand columns in a DataFrame. Parameters ---------- - raw_demands : pd.DataFrame + raw_demands: pd.DataFrame The raw demand data to which units will be added. - length_unit : str + length_unit: str The unit of length to be used (e.g., 'in' for inches). Returns @@ -1477,73 +1463,72 @@ def _add_units(raw_demands, length_unit): """ demands = raw_demands.T - demands.insert(0, "Units", np.nan) + demands.insert(0, 'Units', np.nan) if length_unit == 'in': length_unit = 'inch' - demands = base.convert_to_MultiIndex(demands, axis=0).sort_index(axis=0).T + demands = pd.DataFrame( + base.convert_to_MultiIndex(demands, axis=0).sort_index(axis=0).T + ) - if demands.columns.nlevels == 4: - DEM_level = 1 - else: - DEM_level = 0 + nlevels_with_event_id = 4 + dem_level = 1 if demands.columns.nlevels == nlevels_with_event_id else 0 # drop demands with no EDP type identified - demands.drop( - demands.columns[demands.columns.get_level_values(DEM_level) == ''], + demands = demands.drop( + demands.columns[demands.columns.get_level_values(dem_level) == ''], axis=1, - inplace=True, ) # assign units - demand_cols = demands.columns.get_level_values(DEM_level) + demand_cols = demands.columns.get_level_values(dem_level).to_list() # remove additional info from demand names demand_cols = [d.split('_')[0] for d in demand_cols] # acceleration - acc_EDPs = ['PFA', 'PGA', 'SA'] - EDP_mask = np.isin(demand_cols, acc_EDPs) + acc_edps = ['PFA', 'PGA', 'SA'] + edp_mask = np.isin(demand_cols, acc_edps) - if np.any(EDP_mask): - demands.iloc[0, EDP_mask] = length_unit + 'ps2' + if np.any(edp_mask): + demands.iloc[0, edp_mask] = length_unit + 'ps2' # type: ignore # speed - speed_EDPs = ['PFV', 'PWS', 'PGV', 'SV'] - EDP_mask = np.isin(demand_cols, speed_EDPs) + speed_edps = ['PFV', 'PWS', 'PGV', 'SV'] + edp_mask = np.isin(demand_cols, speed_edps) - if np.any(EDP_mask): - demands.iloc[0, EDP_mask] = length_unit + 'ps' + if np.any(edp_mask): + demands.iloc[0, edp_mask] = length_unit + 'ps' # type: ignore # displacement - disp_EDPs = ['PFD', 'PIH', 'SD', 'PGD'] - EDP_mask = np.isin(demand_cols, disp_EDPs) + disp_edps = ['PFD', 'PIH', 'SD', 'PGD'] + edp_mask = np.isin(demand_cols, disp_edps) - if np.any(EDP_mask): - demands.iloc[0, EDP_mask] = length_unit + if np.any(edp_mask): + demands.iloc[0, edp_mask] = length_unit # type: ignore # drift ratio - rot_EDPs = ['PID', 'PRD', 'DWD', 'RDR', 'PMD', 'RID'] - EDP_mask = np.isin(demand_cols, rot_EDPs) + rot_edps = ['PID', 'PRD', 'DWD', 'RDR', 'PMD', 'RID'] + edp_mask = np.isin(demand_cols, rot_edps) - if np.any(EDP_mask): - demands.iloc[0, EDP_mask] = 'unitless' + if np.any(edp_mask): + demands.iloc[0, edp_mask] = 'unitless' # type: ignore # convert back to simple header and return the DF return base.convert_to_SimpleIndex(demands, axis=1) def _loss__add_replacement_energy( - adf, - DL_method, - unit=None, - median=None, - distribution=None, - theta_1=None, -): + adf: pd.DataFrame, + dl_method: str, + unit: str | None = None, + median: float | None = None, + distribution: str | None = None, + theta_1: float | None = None, +) -> None: """ - Adds replacement energy information. + Add replacement energy information. Parameters ---------- @@ -1573,44 +1558,37 @@ def _loss__add_replacement_energy( """ ren = ('replacement', 'Energy') if median is not None: + # TODO(JVM): in this case we need unit (add config parser check) - # TODO: in this case we need unit (add config parser check) - - adf.loc[ren, ('Quantity', 'Unit')] = "1 EA" + adf.loc[ren, ('Quantity', 'Unit')] = '1 EA' adf.loc[ren, ('DV', 'Unit')] = unit adf.loc[ren, ('DS1', 'Theta_0')] = median if distribution is not None: - - # TODO: in this case we need theta_1 (add config parser check) + # TODO(JVM): in this case we need theta_1 (add config parser check) adf.loc[ren, ('DS1', 'Family')] = distribution adf.loc[ren, ('DS1', 'Theta_1')] = theta_1 - else: - # add a default replacement energy value as a placeholder - # the default value depends on the consequence database - - # for FEMA P-58, use 0 kg - if DL_method == 'FEMA P-58': - adf.loc[ren, ('Quantity', 'Unit')] = '1 EA' - adf.loc[ren, ('DV', 'Unit')] = 'MJ' - adf.loc[ren, ('DS1', 'Theta_0')] = 0 + elif dl_method == 'FEMA P-58': + adf.loc[ren, ('Quantity', 'Unit')] = '1 EA' + adf.loc[ren, ('DV', 'Unit')] = 'MJ' + adf.loc[ren, ('DS1', 'Theta_0')] = 0 - else: - # for everything else, remove this consequence - adf.drop(ren, inplace=True) + else: + # for everything else, remove this consequence + adf = adf.drop(ren) def _loss__add_replacement_carbon( - adf, - damage_process_approach, - unit=None, - median=None, - distribution=None, - theta_1=None, -): + adf: pd.DataFrame, + damage_process_approach: str, + unit: str | None = None, + median: float | None = None, + distribution: str | None = None, + theta_1: float | None = None, +) -> None: """ - Adds replacement carbon emission information. + Add replacement carbon emission information. Parameters ---------- @@ -1641,47 +1619,39 @@ def _loss__add_replacement_carbon( """ rcarb = ('replacement', 'Carbon') if median is not None: + # TODO(JVM): in this case we need unit (add config parser check) - # TODO: in this case we need unit (add config parser check) - - adf.loc[rcarb, ('Quantity', 'Unit')] = "1 EA" + adf.loc[rcarb, ('Quantity', 'Unit')] = '1 EA' adf.loc[rcarb, ('DV', 'Unit')] = unit adf.loc[rcarb, ('DS1', 'Theta_0')] = median if distribution is not None: - - # TODO: in this case we need theta_1 (add config parser check) + # TODO(JVM): in this case we need theta_1 (add config parser check) adf.loc[rcarb, ('DS1', 'Family')] = distribution adf.loc[rcarb, ('DS1', 'Theta_1')] = theta_1 - else: - - # add a default replacement carbon value as a placeholder - # the default value depends on the consequence database - - # for FEMA P-58, use 0 kg - if damage_process_approach == 'FEMA P-58': - adf.loc[rcarb, ('Quantity', 'Unit')] = '1 EA' - adf.loc[rcarb, ('DV', 'Unit')] = 'kg' - adf.loc[rcarb, ('DS1', 'Theta_0')] = 0 + elif damage_process_approach == 'FEMA P-58': + adf.loc[rcarb, ('Quantity', 'Unit')] = '1 EA' + adf.loc[rcarb, ('DV', 'Unit')] = 'kg' + adf.loc[rcarb, ('DS1', 'Theta_0')] = 0 - else: - # for everything else, remove this consequence - adf.drop(rcarb, inplace=True) + else: + # for everything else, remove this consequence + adf = adf.drop(rcarb) def _loss__add_replacement_time( - adf, - damage_process_approach, - conseq_df, - occupancy_type=None, - unit=None, - median=None, - distribution=None, - theta_1=None, -): + adf: pd.DataFrame, + damage_process_approach: str, + conseq_df: pd.DataFrame, + occupancy_type: str | None = None, + unit: str | None = None, + median: float | None = None, + distribution: str | None = None, + theta_1: float | None = None, +) -> None: """ - Adds replacement time information. + Add replacement time information. Parameters ---------- @@ -1724,57 +1694,49 @@ def _loss__add_replacement_time( """ rt = ('replacement', 'Time') if median is not None: + # TODO(JVM): in this case we need unit (add config parser check) - # TODO: in this case we need unit (add config parser check) - - adf.loc[rt, ('Quantity', 'Unit')] = "1 EA" + adf.loc[rt, ('Quantity', 'Unit')] = '1 EA' adf.loc[rt, ('DV', 'Unit')] = unit adf.loc[rt, ('DS1', 'Theta_0')] = median if distribution is not None: - - # TODO: in this case we need theta_1 (add config parser check) + # TODO(JVM): in this case we need theta_1 (add config parser check) adf.loc[rt, ('DS1', 'Family')] = distribution adf.loc[rt, ('DS1', 'Theta_1')] = theta_1 - else: - - # add a default replacement time value as a placeholder - # the default value depends on the consequence database - - # for FEMA P-58, use 0 worker_days - if damage_process_approach == 'FEMA P-58': - adf.loc[rt, ('Quantity', 'Unit')] = '1 EA' - adf.loc[rt, ('DV', 'Unit')] = 'worker_day' - adf.loc[rt, ('DS1', 'Theta_0')] = 0 - - # for Hazus EQ, use 1.0 as a loss_ratio - elif damage_process_approach == 'Hazus Earthquake - Buildings': - adf.loc[rt, ('Quantity', 'Unit')] = '1 EA' - adf.loc[rt, ('DV', 'Unit')] = 'day' - - # load the replacement time that corresponds to total loss - adf.loc[rt, ('DS1', 'Theta_0')] = conseq_df.loc[ - (f"STR.{occupancy_type}", 'Time'), ('DS5', 'Theta_0') - ] + elif damage_process_approach == 'FEMA P-58': + adf.loc[rt, ('Quantity', 'Unit')] = '1 EA' + adf.loc[rt, ('DV', 'Unit')] = 'worker_day' + adf.loc[rt, ('DS1', 'Theta_0')] = 0 + + # for Hazus EQ, use 1.0 as a loss_ratio + elif damage_process_approach == 'Hazus Earthquake - Buildings': + adf.loc[rt, ('Quantity', 'Unit')] = '1 EA' + adf.loc[rt, ('DV', 'Unit')] = 'day' + + # load the replacement time that corresponds to total loss + adf.loc[rt, ('DS1', 'Theta_0')] = conseq_df.loc[ + (f'STR.{occupancy_type}', 'Time'), ('DS5', 'Theta_0') + ] - # otherwise, use 1 (and expect to have it defined by the user) - else: - adf.loc[rt, ('Quantity', 'Unit')] = '1 EA' - adf.loc[rt, ('DV', 'Unit')] = 'loss_ratio' - adf.loc[rt, ('DS1', 'Theta_0')] = 1 + # otherwise, use 1 (and expect to have it defined by the user) + else: + adf.loc[rt, ('Quantity', 'Unit')] = '1 EA' + adf.loc[rt, ('DV', 'Unit')] = 'loss_ratio' + adf.loc[rt, ('DS1', 'Theta_0')] = 1 def _loss__add_replacement_cost( - adf, - DL_method, - unit=None, - median=None, - distribution=None, - theta_1=None, -): + adf: pd.DataFrame, + dl_method: str, + unit: str | None = None, + median: float | None = None, + distribution: str | None = None, + theta_1: float | None = None, +) -> None: """ - Adds replacement cost information. + Add replacement cost information. Parameters ---------- @@ -1799,47 +1761,41 @@ def _loss__add_replacement_cost( """ rc = ('replacement', 'Cost') if median is not None: + # TODO(JVM): in this case we need unit (add config parser check) - # TODO: in this case we need unit (add config parser check) - - adf.loc[rc, ('Quantity', 'Unit')] = "1 EA" + adf.loc[rc, ('Quantity', 'Unit')] = '1 EA' adf.loc[rc, ('DV', 'Unit')] = unit adf.loc[rc, ('DS1', 'Theta_0')] = median if distribution is not None: - - # TODO: in this case we need theta_1 (add config parser check) + # TODO(JVM): in this case we need theta_1 (add config parser check) adf.loc[rc, ('DS1', 'Family')] = distribution adf.loc[rc, ('DS1', 'Theta_1')] = theta_1 - else: - - # add a default replacement cost value as a placeholder - # the default value depends on the consequence database - - # for FEMA P-58, use 0 USD - if DL_method == 'FEMA P-58': - adf.loc[rc, ('Quantity', 'Unit')] = '1 EA' - adf.loc[rc, ('DV', 'Unit')] = 'USD_2011' - adf.loc[rc, ('DS1', 'Theta_0')] = 0 + elif dl_method == 'FEMA P-58': + adf.loc[rc, ('Quantity', 'Unit')] = '1 EA' + adf.loc[rc, ('DV', 'Unit')] = 'USD_2011' + adf.loc[rc, ('DS1', 'Theta_0')] = 0 - # for Hazus EQ and HU, use 1.0 as a loss_ratio - elif DL_method in {'Hazus Earthquake', 'Hazus Hurricane'}: - adf.loc[rc, ('Quantity', 'Unit')] = '1 EA' - adf.loc[rc, ('DV', 'Unit')] = 'loss_ratio' + # for Hazus EQ and HU, use 1.0 as a loss_ratio + elif dl_method in {'Hazus Earthquake', 'Hazus Hurricane'}: + adf.loc[rc, ('Quantity', 'Unit')] = '1 EA' + adf.loc[rc, ('DV', 'Unit')] = 'loss_ratio' - # store the replacement cost that corresponds to total loss - adf.loc[rc, ('DS1', 'Theta_0')] = 1.00 + # store the replacement cost that corresponds to total loss + adf.loc[rc, ('DS1', 'Theta_0')] = 1.00 - # otherwise, use 1 (and expect to have it defined by the user) - else: - adf.loc[rc, ('Quantity', 'Unit')] = '1 EA' - adf.loc[rc, ('DV', 'Unit')] = 'loss_ratio' - adf.loc[rc, ('DS1', 'Theta_0')] = 1 + # otherwise, use 1 (and expect to have it defined by the user) + else: + adf.loc[rc, ('Quantity', 'Unit')] = '1 EA' + adf.loc[rc, ('DV', 'Unit')] = 'loss_ratio' + adf.loc[rc, ('DS1', 'Theta_0')] = 1 -def _loss__map_user(custom_model_dir, loss_map_path=None): +def _loss__map_user( + custom_model_dir: str, loss_map_path: str | None = None +) -> pd.DataFrame: """ Load a user-defined loss map from a specified path. @@ -1864,20 +1820,25 @@ def _loss__map_user(custom_model_dir, loss_map_path=None): """ if loss_map_path is not None: - loss_map_path = loss_map_path.replace('CustomDLDataFolder', custom_model_dir) else: - raise ValueError('Missing loss map path.') + msg = 'Missing loss map path.' + raise ValueError(msg) - loss_map = pd.read_csv(loss_map_path, index_col=0) + return pd.read_csv(loss_map_path, index_col=0) - return loss_map - -def _loss__map_auto(assessment, conseq_df, DL_method, occupancy_type=None): +def _loss__map_auto( + assessment: DLCalculationAssessment, + conseq_df: pd.DataFrame, + dl_method: str, + occupancy_type: str | None = None, +) -> pd.DataFrame: """ - Automatically generates a loss map based on the damage sample and + Automatically generate a loss map. + + Automatically generate a loss map based on the damage sample and the consequence database. Parameters @@ -1914,6 +1875,7 @@ def _loss__map_auto(assessment, conseq_df, DL_method, occupancy_type=None): """ # get the damage sample dmg_sample = assessment.damage.save_sample() + assert isinstance(dmg_sample, pd.DataFrame) # create a mapping for all components that are also in # the prescribed consequence database @@ -1923,7 +1885,7 @@ def _loss__map_auto(assessment, conseq_df, DL_method, occupancy_type=None): drivers = [] loss_models = [] - if DL_method in {'FEMA P-58', 'Hazus Hurricane'}: + if dl_method in {'FEMA P-58', 'Hazus Hurricane'}: # with these methods, we assume fragility and consequence data # have the same IDs @@ -1935,7 +1897,7 @@ def _loss__map_auto(assessment, conseq_df, DL_method, occupancy_type=None): drivers.append(f'DMG-{dmg_cmp}') loss_models.append(dmg_cmp) - elif DL_method in { + elif dl_method in { 'Hazus Earthquake', 'Hazus Earthquake Transportation', }: @@ -1955,13 +1917,8 @@ def _loss__map_auto(assessment, conseq_df, DL_method, occupancy_type=None): drivers.append(f'DMG-{dmg_cmp}') loss_models.append(loss_cmp) - loss_map = pd.DataFrame(loss_models, columns=['Repair'], index=drivers) - - return loss_map + return pd.DataFrame(loss_models, columns=['Repair'], index=drivers) class TimeBasedAssessment: - """ - Time-based assessment. - - """ + """Time-based assessment.""" diff --git a/pelicun/auto.py b/pelicun/auto.py index 810b8712a..1610a85e6 100644 --- a/pelicun/auto.py +++ b/pelicun/auto.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2023 Leland Stanford Junior University # Copyright (c) 2023 The Regents of the University of California @@ -37,43 +36,51 @@ # Contributors: # Adam Zsarnóczay -""" -This module has classes and methods that auto-populate DL models. -""" +"""Classes and methods that auto-populate DL models.""" from __future__ import annotations -import sys + import importlib +import sys from pathlib import Path +from typing import TYPE_CHECKING from pelicun import base +if TYPE_CHECKING: + import pandas as pd + def auto_populate( - config, auto_script_path, **kwargs # pylint: disable=unused-argument -): + config: dict, + auto_script_path: Path, + **kwargs, # noqa: ANN003 +) -> tuple[dict, pd.DataFrame]: """ - Automatically populates the Damage and Loss (DL) configuration for - a Pelicun calculation using predefined rules. + Auto populate the DL configuration with predefined rules. - This function modifies the provided configuration dictionary based - on an external Python script that defines auto-population - rules. It supports using built-in scripts or custom scripts - specified by the user. + Automatically populates the Damage and Loss (DL) configuration for + a Pelicun calculation using predefined rules. This function + modifies the provided configuration dictionary based on an + external Python script that defines auto-population rules. It + supports using built-in scripts or custom scripts specified by the + user. Parameters ---------- - config : dict + config: dict A configuration dictionary with a 'GeneralInformation' key that holds another dictionary with attributes of the asset of interest. This dictionary is modified in-place with auto-populated values. - auto_script_path : str + auto_script_path: str The path pointing to a Python script with the auto-population rules. Built-in scripts can be referenced using the 'PelicunDefault/XY' format where 'XY' is the name of the script. + kwargs + Keyword arguments. Returns ------- @@ -89,33 +96,37 @@ def auto_populate( ValueError If the configuration dictionary does not contain necessary asset information under 'GeneralInformation'. - """ + """ # try to get the AIM attributes - AIM = config.get('GeneralInformation', None) - if AIM is None: - raise ValueError( - "No Asset Information provided for the auto-population routine." - ) + aim = config.get('GeneralInformation') + if aim is None: + msg = 'No Asset Information provided for the auto-population routine.' + raise ValueError(msg) # replace default keyword with actual path in auto_script location - if 'PelicunDefault/' in auto_script_path: - auto_script_path = auto_script_path.replace( - 'PelicunDefault/', f'{base.pelicun_path}/resources/auto/' - ) + path_parts = Path(auto_script_path).resolve().parts + new_parts: list[str] = [ + (Path(base.pelicun_path) / 'resources/auto').resolve().absolute().as_posix() + if part == 'PelicunDefault' + else part + for part in path_parts + ] + if 'PelicunDefault' in path_parts: + auto_script_path = Path(*new_parts) # load the auto population module - ASP = Path(auto_script_path).resolve() - sys.path.insert(0, str(ASP.parent) + '/') - auto_script = importlib.__import__(ASP.name[:-3], globals(), locals(), [], 0) + asp = Path(auto_script_path).resolve() + sys.path.insert(0, str(asp.parent) + '/') + auto_script = importlib.__import__(asp.name[:-3], globals(), locals(), [], 0) auto_populate_ext = auto_script.auto_populate # generate the DL input data - AIM_ap, DL_ap, CMP = auto_populate_ext(AIM=config) + aim_ap, dl_ap, comp = auto_populate_ext(aim=config) # assemble the extended config - config['GeneralInformation'].update(AIM_ap) - config.update({'DL': DL_ap}) + config['GeneralInformation'].update(aim_ap) + config.update({'DL': dl_ap}) # return the extended config data and the component quantities - return config, CMP + return config, comp diff --git a/pelicun/base.py b/pelicun/base.py index d5a4a3603..93d4ac46d 100644 --- a/pelicun/base.py +++ b/pelicun/base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,32 +37,31 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -This module defines constants, basic classes and methods for pelicun. -""" +"""Constants, basic classes, and methods for pelicun.""" from __future__ import annotations -from typing import Any -from typing import TYPE_CHECKING -from collections.abc import Callable -import os -import sys -from datetime import datetime + +import argparse +import datetime import json +import pprint +import sys import warnings from pathlib import Path -import argparse -import pprint +from typing import TYPE_CHECKING, Any, Optional, TypeVar, overload + +import colorama import numpy as np -from scipy.interpolate import interp1d # type: ignore import pandas as pd -import colorama -from colorama import Fore -from colorama import Style -from pelicun.warnings import PelicunWarning +from colorama import Fore, Style +from scipy.interpolate import interp1d # type: ignore + +from pelicun.pelicun_warnings import PelicunWarning if TYPE_CHECKING: + from collections.abc import Callable + from pelicun.assessment import AssessmentBase @@ -79,10 +77,12 @@ idx = pd.IndexSlice +T = TypeVar('T') + + class Options: """ - Options objects store analysis options and the logging - configuration. + Analysis options and logging configuration. Attributes ---------- @@ -98,9 +98,10 @@ class Options: value some quantity of a given unit needs to be multiplied to be expressed in the base units). Value specified in the user configuration dictionary. Pelicun comes with a set of default - units which are always loaded (see settings/default_units.json - in the pelicun source code). Units specified in the units_file - overwrite the default units. + units which are always loaded (see + `settings/default_units.json` in the pelicun source + code). Units specified in the units_file overwrite the default + units. demand_offset: dict Demand offsets are used in the process of mapping a component location to its associated EDP. This allows components that @@ -148,27 +149,30 @@ class Options: __slots__ = [ '_asmnt', - 'defaults', - 'sampling_method', - 'list_all_ds', - '_seed', '_rng', - 'units_file', + '_seed', + 'defaults', 'demand_offset', - 'nondir_multi_dict', - 'rho_cost_time', 'eco_scale', + 'eco_scale', + 'error_setup', 'error_setup', + 'list_all_ds', 'log', + 'log', + 'nondir_multi_dict', + 'rho_cost_time', + 'sampling_method', + 'units_file', ] def __init__( self, user_config_options: dict[str, Any] | None, assessment: AssessmentBase | None = None, - ): + ) -> None: """ - Initializes an Options object. + Initialize an Options object. Parameters ---------- @@ -180,8 +184,8 @@ def __init__( object. If it is not intended to use this Options object for an Assessment (e.g. defining an Options object for UQ use), this value should be None. - """ + """ self._asmnt = assessment self.defaults: dict[str, Any] | None = None @@ -208,99 +212,96 @@ def __init__( # instantiate a Logger object with the finalized configuration self.log = Logger( - merged_config_options['Verbose'], - merged_config_options['LogShowMS'], merged_config_options['LogFile'], - merged_config_options['PrintLog'], + verbose=merged_config_options['Verbose'], + log_show_ms=merged_config_options['LogShowMS'], + print_log=merged_config_options['PrintLog'], ) @property def seed(self) -> float | None: """ - Seed property + Seed property. Returns ------- float Seed value + """ return self._seed @seed.setter def seed(self, value: float) -> None: - """ - seed property setter - """ + """Seed property setter.""" self._seed = value self._rng = np.random.default_rng(self._seed) # type: ignore @property def rng(self) -> np.random.Generator: """ - rng property + rng property. Returns ------- Generator Random generator + """ return self._rng class Logger: - """ - Logger objects are used to generate log files documenting - execution events and related messages. - - Attributes - ---------- - verbose: bool - If True, the pelicun echoes more information throughout the - assessment. This can be useful for debugging purposes. The - value is specified in the user's configuration dictionary, - otherwise left as provided in the default configuration file - (see settings/default_config.json in the pelicun source code). - log_show_ms: bool - If True, the timestamps in the log file are in microsecond - precision. The value is specified in the user's configuration - dictionary, otherwise left as provided in the default - configuration file (see settings/default_config.json in the - pelicun source code). - log_file: str, optional - If a value is provided, the log is written to that file. The - value is specified in the user's configuration dictionary, - otherwise left as provided in the default configuration file - (see settings/default_config.json in the pelicun source code). - print_log: bool - If True, the log is also printed to standard output. The - value is specified in the user's configuration dictionary, - otherwise left as provided in the default configuration file - (see settings/default_config.json in the pelicun source code). - - """ + """Generate log files documenting execution events.""" __slots__ = [ - 'verbose', - 'log_show_ms', - 'log_file', - 'warning_file', - 'print_log', - 'warning_stack', 'emitted', + 'log_div', + 'log_file', + 'log_show_ms', 'log_time_format', + 'print_log', 'spaces', - 'log_div', + 'verbose', + 'warning_file', + 'warning_stack', ] def __init__( - self, verbose: bool, log_show_ms: bool, log_file: str | None, print_log: bool - ): + self, + log_file: str | None, + *, + verbose: bool, + log_show_ms: bool, + print_log: bool, + ) -> None: """ - Initializes a Logger object. + Initialize a Logger object. Parameters ---------- - see attributes of the Logger class. + verbose: bool + If True, the pelicun echoes more information throughout the + assessment. This can be useful for debugging purposes. The + value is specified in the user's configuration dictionary, + otherwise left as provided in the default configuration file + (see settings/default_config.json in the pelicun source code). + log_show_ms: bool + If True, the timestamps in the log file are in microsecond + precision. The value is specified in the user's configuration + dictionary, otherwise left as provided in the default + configuration file (see settings/default_config.json in the + pelicun source code). + log_file: str, optional + If a value is provided, the log is written to that file. The + value is specified in the user's configuration dictionary, + otherwise left as provided in the default configuration file + (see settings/default_config.json in the pelicun source code). + print_log: bool + If True, the log is also printed to standard output. The + value is specified in the user's configuration dictionary, + otherwise left as provided in the default configuration file + (see settings/default_config.json in the pelicun source code). """ self.verbose = verbose @@ -310,25 +311,16 @@ def __init__( self.log_file = None self.warning_file = None else: - try: - path = Path(log_file) - self.log_file = str(path.resolve()) - name, extension = split_file_name(self.log_file) - self.warning_file = ( - path.parent / (name + '_warnings' + extension) - ).resolve() - with open(self.log_file, 'w', encoding='utf-8') as f: - f.write('') - with open(self.warning_file, 'w', encoding='utf-8') as f: - f.write('') - except BaseException as err: - print( - f"{Fore.RED}WARNING: The filepath provided for the log file " - f"does not point to a valid location: {log_file}. \nPelicun " - f"cannot print the log to a file.\n" - f"The error was: '{err}'{Style.RESET_ALL}" - ) - raise + path = Path(log_file) + self.log_file = str(path.resolve()) + name, extension = split_file_name(self.log_file) + self.warning_file = ( + path.parent / (name + '_warnings' + extension) + ).resolve() + with Path(self.log_file).open('w', encoding='utf-8') as f: + f.write('') + with Path(self.warning_file).open('w', encoding='utf-8') as f: + f.write('') self.print_log = str2bool(print_log) self.warning_stack: list[str] = [] @@ -337,10 +329,7 @@ def __init__( control_warnings() def reset_log_strings(self) -> None: - """ - Populates the string-related attributes of the logger - """ - + """Populate the string-related attributes of the logger.""" if self.log_show_ms: self.log_time_format = '%H:%M:%S:%f' # the length of the time string in the log file @@ -355,11 +344,12 @@ def reset_log_strings(self) -> None: def msg( self, msg: str = '', + *, prepend_timestamp: bool = True, prepend_blank_space: bool = True, ) -> None: """ - Writes a message in the log file with the current time as prefix + Write a message in the log file with the current time as prefix. The time is in ISO-8601 format, e.g. 2018-06-16T20:24:04Z @@ -373,35 +363,29 @@ def msg( Controls whether blank space is placed before the message. """ - - # pylint: disable = consider-using-f-string msg_lines = msg.split('\n') for msg_i, msg_line in enumerate(msg_lines): if prepend_timestamp and (msg_i == 0): - formatted_msg = '{} {}'.format( - datetime.now().strftime(self.log_time_format), msg_line - ) - elif prepend_timestamp: - formatted_msg = self.spaces + msg_line - elif prepend_blank_space: + formatted_msg = f'{datetime.datetime.now().strftime(self.log_time_format)} {msg_line}' # noqa: DTZ005 + elif prepend_timestamp or prepend_blank_space: formatted_msg = self.spaces + msg_line else: formatted_msg = msg_line if self.print_log: - print(formatted_msg) + print(formatted_msg) # noqa: T201 if self.log_file is not None: - with open(self.log_file, 'a', encoding='utf-8') as f: + with Path(self.log_file).open('a', encoding='utf-8') as f: f.write('\n' + formatted_msg) def add_warning(self, msg: str) -> None: """ - Adds a warning to the warning stack. + Add a warning to the warning stack. - Note - ---- + Notes + ----- Warnings are only emitted when `emit_warnings` is called. Parameters @@ -420,15 +404,12 @@ def add_warning(self, msg: str) -> None: self.warning_stack.append(formatted_msg) def emit_warnings(self) -> None: - """ - Issues all warnings and clears the warning stack. - - """ + """Issues all warnings and clears the warning stack.""" for message in self.warning_stack: if message not in self.emitted: warnings.warn(message, PelicunWarning, stacklevel=2) if self.warning_file is not None: - with open(self.warning_file, 'a', encoding='utf-8') as f: + with Path(self.warning_file).open('a', encoding='utf-8') as f: f.write( message.replace(Fore.RED, '') .replace(Style.RESET_ALL, '') @@ -438,7 +419,7 @@ def emit_warnings(self) -> None: self.emitted = self.emitted.union(set(self.warning_stack)) self.warning_stack = [] - def warn(self, msg: str) -> None: + def warning(self, msg: str) -> None: """ Add an emit a warning immediately. @@ -451,28 +432,20 @@ def warn(self, msg: str) -> None: self.add_warning(msg) self.emit_warnings() - def div(self, prepend_timestamp: bool = False) -> None: - """ - Adds a divider line in the log file - """ - - if prepend_timestamp: - msg = self.log_div - else: - msg = '-' * 80 + def div(self, *, prepend_timestamp: bool = False) -> None: + """Add a divider line in the log file.""" + msg = self.log_div if prepend_timestamp else '-' * 80 self.msg(msg, prepend_timestamp=prepend_timestamp) def print_system_info(self) -> None: - """ - Writes system information in the log. - """ - + """Write system information in the log.""" self.msg( 'System Information:', prepend_timestamp=False, prepend_blank_space=False ) + start = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S') # noqa: DTZ005 self.msg( - f'local time zone: {datetime.utcnow().astimezone().tzinfo}\n' - f'start time: {datetime.now().strftime("%Y-%m-%dT%H:%M:%S")}\n' + f'local time zone: {datetime.datetime.utcnow().astimezone().tzinfo}\n' + f'start time: {start}\n' f'python: {sys.version}\n' f'numpy: {np.__version__}\n' f'pandas: {pd.__version__}\n', @@ -481,11 +454,13 @@ def print_system_info(self) -> None: # get the absolute path of the pelicun directory -pelicun_path = Path(os.path.dirname(os.path.abspath(__file__))) +pelicun_path = Path(__file__).resolve().parent def split_file_name(file_path: str) -> tuple[str, str]: """ + Separate a file name from the extension. + Separates a file name from the extension accounting for the case where the file name itself contains periods. @@ -511,14 +486,13 @@ def split_file_name(file_path: str) -> tuple[str, str]: def control_warnings() -> None: """ - Convenience function to turn warnings on/off + Turn warnings on/off. See also: `pelicun/pytest.ini`. Devs: make sure to update that file when addressing & eliminating warnings. """ if not sys.warnoptions: - # Here we specify *specific* warnings to ignore. # 'message' -- a regex that the warning message must match @@ -526,48 +500,49 @@ def control_warnings() -> None: # and plan to address them soon. warnings.filterwarnings( - action='ignore', message=".*Use to_numeric without passing `errors`.*" + action='ignore', message='.*Use to_numeric without passing `errors`.*' ) warnings.filterwarnings( action='ignore', message=".*errors='ignore' is deprecated.*" ) warnings.filterwarnings( action='ignore', - message=".*The previous implementation of stack is deprecated.*", + message='.*The previous implementation of stack is deprecated.*', ) warnings.filterwarnings( action='ignore', - message=".*Setting an item of incompatible dtype is deprecated.*", + message='.*Setting an item of incompatible dtype is deprecated.*', ) warnings.filterwarnings( action='ignore', - message=".*DataFrame.groupby with axis=1 is deprecated.*", + message='.*DataFrame.groupby with axis=1 is deprecated.*', ) def load_default_options() -> dict: """ - Load the default_config.json file to set options to default values + Load the default_config.json file to set options to default values. Returns ------- dict Default options - """ - with open( - pelicun_path / "settings/default_config.json", 'r', encoding='utf-8' + """ + with Path(pelicun_path / 'settings/default_config.json').open( + encoding='utf-8' ) as f: default_config = json.load(f) - default_options = default_config['Options'] - return default_options + return default_config['Options'] def update_vals( update_value: dict, primary: dict, update_path: str, primary_path: str ) -> None: """ + Transfer values between nested dictionaries. + Updates the values of the `update` nested dictionary with those provided in the `primary` nested dictionary. If a key already exists in update, and does not map to another @@ -596,10 +571,10 @@ def update_vals( If primary[key] is dict but update[key] is not. ValueError If update[key] is dict but primary[key] is not. - """ + """ # we go over the keys of `primary` - for key in primary: + for key in primary: # noqa: PLC0206 # if `primary[key]` is a dictionary: if isinstance(primary[key], dict): # if the same `key` does not exist in update, @@ -609,7 +584,7 @@ def update_vals( # if it exists already, it should map to # a dictionary. elif not isinstance(update_value[key], dict): - raise ValueError( + msg = ( f'{update_path}["{key}"] ' 'should map to a dictionary. ' 'The specified value is ' @@ -618,6 +593,7 @@ def update_vals( f'{primary_path}["{key}"] = {primary[key]}. ' f'Please revise {update_path}["{key}"].' ) + raise ValueError(msg) # With both being dictionaries, we use recursion. update_vals( update_value[key], @@ -626,29 +602,25 @@ def update_vals( f'{primary_path}["{key}"]', ) # if `primary[key]` is NOT a dictionary: - else: - # if `key` does not exist in `update`, we add it, with - # its corresponding value. - if key not in update_value: - update_value[key] = primary[key] - else: - # key exists in update and should be left alone, - # but we must check that it's not a dict here: - if isinstance(update_value[key], dict): - raise ValueError( - f'{update_path}["{key}"] ' - 'should not map to a dictionary. ' - f'The specified value is ' - f'{update_path}["{key}"] = {update_value[key]}, but ' - f'the default value is ' - f'{primary_path}["{key}"] = {primary[key]}. ' - f'Please revise {update_path}["{key}"].' - ) - # pylint: enable=else-if-used + elif key not in update_value: + update_value[key] = primary[key] + elif isinstance(update_value[key], dict): + msg = ( + f'{update_path}["{key}"] ' + 'should not map to a dictionary. ' + f'The specified value is ' + f'{update_path}["{key}"] = {update_value[key]}, but ' + f'the default value is ' + f'{primary_path}["{key}"] = {primary[key]}. ' + f'Please revise {update_path}["{key}"].' + ) + raise ValueError(msg) def merge_default_config(user_config: dict | None) -> dict: """ + Merge default config with user's options. + Merge the user-specified config with the configuration defined in the default_config.json file. If the user-specified config does not include some option available in the default options, then the @@ -663,8 +635,8 @@ def merge_default_config(user_config: dict | None) -> dict: ------- dict Merged configuration dictionary - """ + """ config = user_config # start from the user's config default_config = load_default_options() @@ -679,11 +651,28 @@ def merge_default_config(user_config: dict | None) -> dict: return config +# https://stackoverflow.com/questions/52445559/ +# how-can-i-type-hint-a-function-where-the- +# return-type-depends-on-the-input-type-o + + +@overload def convert_to_SimpleIndex( - data: pd.DataFrame, axis: int = 0, inplace: bool = False -) -> pd.DataFrame: + data: pd.DataFrame, axis: int = 0, *, inplace: bool = False +) -> pd.DataFrame: ... + + +@overload +def convert_to_SimpleIndex( + data: pd.Series, axis: int = 0, *, inplace: bool = False +) -> pd.Series: ... + + +def convert_to_SimpleIndex( # noqa: N802 + data: pd.DataFrame | pd.Series, axis: int = 0, *, inplace: bool = False +) -> pd.DataFrame | pd.Series: """ - Converts the index of a DataFrame to a simple, one-level index + Convert the index of a DataFrame to a simple, one-level index. The target index uses standard SimCenter convention to identify different levels: a dash character ('-') is used to separate each @@ -709,22 +698,19 @@ def convert_to_SimpleIndex( ------ ValueError When an invalid axis parameter is specified - """ + """ if axis in {0, 1}: - if inplace: - data_mod = data - else: - data_mod = data.copy() + data_mod = data if inplace else data.copy() if axis == 0: # only perform this if there are multiple levels if data.index.nlevels > 1: simple_name = '-'.join( - [n if n is not None else "" for n in data.index.names] + [n if n is not None else '' for n in data.index.names] ) simple_index = [ - '-'.join([str(id_i) for id_i in id]) for id in data.index + '-'.join([str(id_i) for id_i in idx]) for idx in data.index ] data_mod.index = pd.Index(simple_index, name=simple_name) @@ -734,26 +720,39 @@ def convert_to_SimpleIndex( # only perform this if there are multiple levels if data.columns.nlevels > 1: simple_name = '-'.join( - [n if n is not None else "" for n in data.columns.names] + [n if n is not None else '' for n in data.columns.names] ) simple_index = [ - '-'.join([str(id_i) for id_i in id]) for id in data.columns + '-'.join([str(id_i) for id_i in idx]) for idx in data.columns ] data_mod.columns = pd.Index(simple_index, name=simple_name) data_mod.columns.name = simple_name else: - raise ValueError(f"Invalid axis parameter: {axis}") + msg = f'Invalid axis parameter: {axis}' + raise ValueError(msg) return data_mod +@overload +def convert_to_MultiIndex( + data: pd.DataFrame, axis: int = 0, *, inplace: bool = False +) -> pd.DataFrame: ... + + +@overload def convert_to_MultiIndex( - data: pd.DataFrame, axis: int = 0, inplace: bool = False + data: pd.Series, axis: int = 0, *, inplace: bool = False +) -> pd.Series: ... + + +def convert_to_MultiIndex( # noqa: N802 + data: pd.DataFrame | pd.Series, axis: int = 0, *, inplace: bool = False ) -> pd.DataFrame | pd.Series: """ - Converts the index of a DataFrame to a MultiIndex + Convert the index of a DataFrame to a MultiIndex. We assume that the index uses standard SimCenter convention to identify different levels: a dash character ('-') is expected to @@ -779,8 +778,8 @@ def convert_to_MultiIndex( ------ ValueError If an invalid axis is specified. - """ + """ # check if the requested axis is already a MultiIndex if ((axis == 0) and (isinstance(data.index, pd.MultiIndex))) or ( (axis == 1) and (isinstance(data.columns, pd.MultiIndex)) @@ -795,24 +794,20 @@ def convert_to_MultiIndex( index_labels = [str(label).split('-') for label in data.columns] else: - raise ValueError(f"Invalid axis parameter: {axis}") + msg = f'Invalid axis parameter: {axis}' + raise ValueError(msg) max_lbl_len = np.max([len(labels) for labels in index_labels]) for l_i, labels in enumerate(index_labels): if len(labels) != max_lbl_len: - labels += [ - '', - ] * (max_lbl_len - len(labels)) + labels += [''] * (max_lbl_len - len(labels)) # noqa: PLW2901 index_labels[l_i] = labels index_labels_np = np.array(index_labels) if index_labels_np.shape[1] > 1: - if inplace: - data_mod = data - else: - data_mod = data.copy() + data_mod = data if inplace else data.copy() if axis == 0: data_mod.index = pd.MultiIndex.from_arrays(index_labels_np.T) @@ -827,9 +822,10 @@ def convert_to_MultiIndex( def convert_dtypes(dataframe: pd.DataFrame) -> pd.DataFrame: """ - Convert columns to a numeric datatype whenever possible. The - function replaces None with NA otherwise columns containing None - would continue to have the `object` type + Convert columns to a numeric datatype whenever possible. + + The function replaces None with NA otherwise columns containing + None would continue to have the `object` type. Parameters ---------- @@ -842,7 +838,7 @@ def convert_dtypes(dataframe: pd.DataFrame) -> pd.DataFrame: The modified DataFrame. """ - dataframe.fillna(value=np.nan, inplace=True) + dataframe = dataframe.fillna(value=np.nan) # note: `axis=0` applies the function to the columns # note: ignoring errors is a bad idea and should never be done. In # this case, however, that's not what we do, despite the name of @@ -852,20 +848,23 @@ def convert_dtypes(dataframe: pd.DataFrame) -> pd.DataFrame: # See: # https://pandas.pydata.org/docs/reference/api/pandas.to_numeric.html return dataframe.apply( - lambda x: pd.to_numeric(x, errors='ignore'), axis=0 # type:ignore + lambda x: pd.to_numeric(x, errors='ignore'), # type:ignore + axis=0, ) -def show_matrix(data, use_describe=False): +def show_matrix( + data: np.ndarray | pd.DataFrame, *, use_describe: bool = False +) -> None: """ Print a matrix in a nice way using a DataFrame. Parameters ---------- - data : array-like + data: array-like The matrix data to display. Can be any array-like structure that pandas can convert to a DataFrame. - use_describe : bool, default: False + use_describe: bool, default: False If True, provides a descriptive statistical summary of the matrix including specified percentiles. If False, simply prints the matrix as is. @@ -884,10 +883,13 @@ def multiply_factor_multiple_levels( conditions: dict, factor: float, axis: int = 0, + *, raise_missing: bool = True, ) -> None: """ - Multiply a value to selected rows of a DataFrame that is indexed + Multiply a value to selected rows, in place. + + Multiplies a value to selected rows of a DataFrame that is indexed with a hierarchical index (pd.MultiIndex). The change is done in place. @@ -919,23 +921,23 @@ def multiply_factor_multiple_levels( is True. """ - if axis == 0: idx_to_use = df.index elif axis == 1: idx_to_use = df.columns else: - raise ValueError(f'Invalid axis: `{axis}`') + msg = f'Invalid axis: `{axis}`' + raise ValueError(msg) - mask = pd.Series(True, index=idx_to_use) + mask = pd.Series(data=True, index=idx_to_use) # Apply each condition to update the mask for level, value in conditions.items(): mask &= idx_to_use.get_level_values(level) == value - # pylint: disable=singleton-comparison - if np.all(mask == False) and raise_missing: # noqa - raise ValueError(f'No rows found matching the conditions: `{conditions}`') + if np.all(mask == False) and raise_missing: # noqa: E712 + msg = f'No rows found matching the conditions: `{conditions}`' + raise ValueError(msg) if axis == 0: df.iloc[mask.to_numpy()] *= factor @@ -948,10 +950,12 @@ def _warning( category: type[Warning], filename: str, lineno: int, - file: Any = None, - line: Any = None, + file: Any = None, # noqa: ARG001, ANN401 + line: Any = None, # noqa: ARG001, ANN401 ) -> None: """ + Display warnings in a custom format. + Custom warning function to format and print warnings more attractively. This function modifies how warning messages are displayed, emphasizing the file path and line number from where @@ -959,22 +963,23 @@ def _warning( Parameters ---------- - message : str + message: str The warning message to be displayed. - category : Warning + category: Warning The category of the warning (unused, but required for compatibility with standard warning signature). - filename : str + filename: str The path of the file from which the warning is issued. The function simplifies the path for display. - lineno : int + lineno: int The line number in the file at which the warning is issued. - file : file-like object, optional + file: file-like object, optional The target file object to write the warning to (unused, but required for compatibility with standard warning signature). - line : str, optional + line: str, optional Line of code causing the warning (unused, but required for compatibility with standard warning signature). + """ # pylint:disable = unused-argument if category != PelicunWarning: @@ -985,21 +990,18 @@ def _warning( else: file_path = None - if file_path is not None: - python_file = '/'.join(file_path[-3:]) - else: - python_file = filename - print(f'WARNING in {python_file} at line {lineno}\n{message}\n') + python_file = '/'.join(file_path[-3:]) if file_path is not None else filename + print(f'WARNING in {python_file} at line {lineno}\n{message}\n') # noqa: T201 else: - print(message) + print(message) # noqa: T201 warnings.showwarning = _warning # type: ignore def describe( - df, - percentiles=( + data: pd.DataFrame | pd.Series | np.ndarray, + percentiles: tuple[float, ...] = ( 0.001, 0.023, 0.10, @@ -1010,11 +1012,12 @@ def describe( 0.977, 0.999, ), -): +) -> pd.DataFrame: """ + Extend descriptive statistics. + Provides extended descriptive statistics for given data, including percentiles and log standard deviation for applicable columns. - This function accepts both pandas Series and DataFrame objects directly, or any array-like structure which can be converted to them. It calculates common descriptive statistics and optionally @@ -1023,10 +1026,10 @@ def describe( Parameters ---------- - df : pd.Series, pd.DataFrame, or array-like + data: pd.Series, pd.DataFrame, or array-like The data to describe. If array-like, it is converted to a DataFrame or Series before analysis. - percentiles : tuple of float, optional + percentiles: tuple of float, optional Specific percentiles to include in the output. Default includes an extensive range tailored to provide a detailed summary. @@ -1036,37 +1039,37 @@ def describe( pd.DataFrame A DataFrame containing the descriptive statistics of the input data, transposed so that each descriptive statistic is a row. + """ - if not isinstance(df, (pd.Series, pd.DataFrame)): - vals = df - cols = np.arange(vals.shape[1]) if vals.ndim > 1 else 0 + if isinstance(data, np.ndarray): + vals = data if vals.ndim == 1: - df = pd.Series(vals, name=cols) + data = pd.Series(vals, name=0) else: - df = pd.DataFrame(vals, columns=cols) + cols = np.arange(vals.shape[1]) + data = pd.DataFrame(vals, columns=cols) - # convert Series into a DataFrame - if isinstance(df, pd.Series): - df = pd.DataFrame(df) + # convert Series to a DataFrame + if isinstance(data, pd.Series): + data = pd.DataFrame(data) - desc = df.describe(list(percentiles)).T + desc = pd.DataFrame(data.describe(list(percentiles)).T) # add log standard deviation to the stats - desc.insert(3, "log_std", np.nan) + desc.insert(3, 'log_std', np.nan) desc = desc.T for col in desc.columns: - if np.min(df[col]) > 0.0: - desc.loc['log_std', col] = np.std(np.log(df[col]), ddof=1) + if np.min(data[col]) > 0.0: + desc.loc['log_std', col] = np.std(np.log(data[col]), ddof=1) return desc -def str2bool(v: str | bool) -> bool: +def str2bool(v: str | bool) -> bool: # noqa: FBT001 """ - Converts a string representation of truth to boolean True or - False. + Convert a string representation of truth to boolean True or False. This function is designed to convert string inputs that represent boolean values into actual Python boolean types. It handles @@ -1075,7 +1078,7 @@ def str2bool(v: str | bool) -> bool: Parameters ---------- - v : str or bool + v: str or bool The value to convert into a boolean. This can be a boolean itself (in which case it is simply returned) or a string that is expected to represent a boolean value. @@ -1091,6 +1094,7 @@ def str2bool(v: str | bool) -> bool: If `v` is a string that does not correspond to a boolean value, an error is raised indicating that a boolean value was expected. + """ # courtesy of Maxim @ Stackoverflow @@ -1100,13 +1104,13 @@ def str2bool(v: str | bool) -> bool: return True if v.lower() in {'no', 'false', 'False', 'f', 'n', '0'}: return False - raise argparse.ArgumentTypeError('Boolean value expected.') + msg = 'Boolean value expected.' + raise argparse.ArgumentTypeError(msg) -def float_or_None(string: str) -> float | None: +def float_or_None(string: str) -> float | None: # noqa: N802 """ - This is a convenience function for converting strings to float or - None + Convert strings to float or None. Parameters ---------- @@ -1118,18 +1122,17 @@ def float_or_None(string: str) -> float | None: float or None A float, if the given string can be converted to a float. Otherwise, it returns None + """ try: - res = float(string) - return res + return float(string) except ValueError: return None -def int_or_None(string: str) -> int | None: +def int_or_None(string: str) -> int | None: # noqa: N802 """ - This is a convenience function for converting strings to int or - None + Convert strings to int or None. Parameters ---------- @@ -1141,16 +1144,18 @@ def int_or_None(string: str) -> int | None: int or None An int, if the given string can be converted to an int. Otherwise, it returns None + """ try: - res = int(string) - return res + return int(string) except ValueError: return None def with_parsed_str_na_values(df: pd.DataFrame) -> pd.DataFrame: """ + Identify string values interpretable as N/A. + Given a dataframe, this function identifies values that have string type and can be interpreted as N/A, and replaces them with actual NA's. @@ -1199,6 +1204,8 @@ def with_parsed_str_na_values(df: pd.DataFrame) -> pd.DataFrame: def dedupe_index(dataframe: pd.DataFrame, dtype: type = str) -> pd.DataFrame: """ + Add a `uid` level to the index. + Modifies the index of a DataFrame to ensure all index elements are unique by adding an extra level. Assumes that the DataFrame's original index is a MultiIndex with specified names. A unique @@ -1208,24 +1215,24 @@ def dedupe_index(dataframe: pd.DataFrame, dtype: type = str) -> pd.DataFrame: Parameters ---------- - dataframe : pd.DataFrame + dataframe: pd.DataFrame The DataFrame whose index is to be modified. It must have a MultiIndex. - dtype : type, optional + dtype: type, optional The data type for the new index level 'uid'. Defaults to str. Returns ------- - dataframe - The DataFrame with a modified index. + dataframe: pd.DataFrame + The original dataframe with an additional `uid` level at the + index. """ inames = dataframe.index.names dataframe = dataframe.reset_index() dataframe['uid'] = (dataframe.groupby([*inames]).cumcount()).astype(dtype) - dataframe = dataframe.set_index([*inames] + ['uid']) - dataframe = dataframe.sort_index() - return dataframe + dataframe = dataframe.set_index([*inames, 'uid']) + return dataframe.sort_index() # Input specs @@ -1270,16 +1277,17 @@ def dedupe_index(dataframe: pd.DataFrame, dtype: type = str) -> pd.DataFrame: def dict_raise_on_duplicates(ordered_pairs: list[tuple]) -> dict: """ + Construct a dictionary from a list of key-value pairs. + Constructs a dictionary from a list of key-value pairs, raising an exception if duplicate keys are found. - This function ensures that no two pairs have the same key. It is particularly useful when parsing JSON-like data where unique keys are expected but not enforced by standard parsing methods. Parameters ---------- - ordered_pairs : list of tuples + ordered_pairs: list of tuples A list of tuples, each containing a key and a value. Keys are expected to be unique across the list. @@ -1306,18 +1314,19 @@ def dict_raise_on_duplicates(ordered_pairs: list[tuple]) -> dict: ----- This implementation is useful for contexts in which data integrity is crucial and key uniqueness must be ensured. - """ + """ d = {} for k, v in ordered_pairs: if k in d: - raise ValueError(f"duplicate key: {k}") + msg = f'duplicate key: {k}' + raise ValueError(msg) d[k] = v return d -def parse_units( - custom_file: str | None = None, preserve_categories: bool = False +def parse_units( # noqa: C901 + custom_file: str | None = None, *, preserve_categories: bool = False ) -> dict: """ Parse the unit conversion factor JSON file and return a dictionary. @@ -1327,6 +1336,12 @@ def parse_units( custom_file: str, optional If a custom file is provided, only the units specified in the custom file are used. + preserve_categories: bool, optional + If True, maintains the original data types of category + values from the JSON file. If False, converts all values + to floats and flattens the dictionary structure, ensuring + that each unit name is globally unique across categories. + Returns ------- @@ -1338,20 +1353,12 @@ def parse_units( `preserve_categories` is False, the dictionary is flattened to have globally unique unit names. - Raises - ------ - KeyError - If a key is defined twice. - ValueError - If a unit conversion factor is not a float. - FileNotFoundError - If a file does not exist. - Exception - If a file does not have the JSON format. """ - def get_contents(file_path, preserve_categories=False): + def get_contents(file_path: Path, *, preserve_categories: bool = False) -> dict: # noqa: C901 """ + Map unit names to conversion factors. + Parses a unit conversion factors JSON file and returns a dictionary mapping unit names to conversion factors. @@ -1364,10 +1371,10 @@ def get_contents(file_path, preserve_categories=False): Parameters ---------- - file_path : str + file_path: str The file path to a JSON file containing unit conversion factors. If not provided, a default file is used. - preserve_categories : bool, optional + preserve_categories: bool, optional If True, maintains the original data types of category values from the JSON file. If False, converts all values to floats and flattens the dictionary structure, ensuring @@ -1386,10 +1393,9 @@ def get_contents(file_path, preserve_categories=False): FileNotFoundError If the specified file does not exist. ValueError - If a unit name is duplicated, a conversion factor is not a - float, or other JSON structure issues are present. - json.decoder.JSONDecodeError - If the file is not a valid JSON file. + If a unit name is duplicated or other JSON structure issues are present. + TypeError + If a conversion factor is not a float. TypeError If any value that needs to be converted to float cannot be converted. @@ -1401,30 +1407,35 @@ def get_contents(file_path, preserve_categories=False): >>> parse_units('custom_units.json', preserve_categories=True) { 'Length': {'m': 1.0, 'cm': 0.01, 'mm': 0.001} } + """ try: - with open(file_path, 'r', encoding='utf-8') as f: + with Path(file_path).open(encoding='utf-8') as f: dictionary = json.load(f, object_pairs_hook=dict_raise_on_duplicates) except FileNotFoundError as exc: - raise FileNotFoundError(f'{file_path} was not found.') from exc + msg = f'{file_path} was not found.' + raise FileNotFoundError(msg) from exc except json.decoder.JSONDecodeError as exc: - raise ValueError(f'{file_path} is not a valid JSON file.') from exc + msg = f'{file_path} is not a valid JSON file.' + raise ValueError(msg) from exc for category_dict in list(dictionary.values()): # ensure all first-level keys point to a dictionary if not isinstance(category_dict, dict): - raise ValueError( + msg = ( f'{file_path} contains first-level keys ' - 'that don\'t point to a dictionary' + "that don't point to a dictionary" ) + raise TypeError(msg) # convert values to float - for key, val in category_dict.items(): - try: + try: + for key, val in category_dict.items(): category_dict[key] = float(val) - except (ValueError, TypeError) as exc: - raise type(exc)( - f'Unit {key} has a value of {val} ' - 'which cannot be interpreted as a float' - ) from exc + except (ValueError, TypeError) as exc: + msg = ( + f'Unit {key} has a value of {val} ' + 'which cannot be interpreted as a float' + ) + raise type(exc)(msg) from exc if preserve_categories: return dictionary @@ -1433,27 +1444,31 @@ def get_contents(file_path, preserve_categories=False): for category in dictionary: for unit_name, factor in dictionary[category].items(): if unit_name in flattened: - raise ValueError(f'{unit_name} defined twice in {file_path}.') + msg = f'{unit_name} defined twice in {file_path}.' + raise ValueError(msg) flattened[unit_name] = factor return flattened if custom_file: - return get_contents(custom_file, preserve_categories) + return get_contents( + Path(custom_file), preserve_categories=preserve_categories + ) return get_contents( - pelicun_path / "settings/default_units.json", preserve_categories + pelicun_path / 'settings/default_units.json', + preserve_categories=preserve_categories, ) -def convert_units( +def convert_units( # noqa: C901 values: float | list[float] | np.ndarray, unit: str, to_unit: str, category: str | None = None, ) -> float | list[float] | np.ndarray: """ - Converts numeric values between different units. + Convert numeric values between different units. Supports conversion within a specified category of units and automatically infers the category if not explicitly provided. It @@ -1461,13 +1476,13 @@ def convert_units( Parameters ---------- - values (float | list[float] | np.ndarray): + values: (float | list[float] | np.ndarray) The numeric value(s) to convert. - unit (str): + unit: (str) The current unit of the values. - to_unit (str): + to_unit: (str) The target unit to convert the values into. - category (Optional[str]): + category: (Optional[str]) The category of the units (e.g., 'length', 'pressure'). If not provided, the category will be inferred based on the provided units. @@ -1488,13 +1503,13 @@ def convert_units( and `to_unit` are not in the same category. """ - if isinstance(values, (float, list)): vals = np.atleast_1d(values) elif isinstance(values, np.ndarray): vals = values else: - raise TypeError('Invalid input type for `values`') + msg = 'Invalid input type for `values`' + raise TypeError(msg) # load default units all_units = parse_units(preserve_categories=True) @@ -1502,11 +1517,13 @@ def convert_units( # if a category is given use it, otherwise try to determine it if category: if category not in all_units: - raise ValueError(f'Unknown category: `{category}`') + msg = f'Unknown category: `{category}`' + raise ValueError(msg) units = all_units[category] for unt in unit, to_unit: if unt not in units: - raise ValueError(f'Unknown unit: `{unt}`') + msg = f'Unknown unit: `{unt}`' + raise ValueError(msg) else: unit_category: str | None = None for key in all_units: @@ -1515,13 +1532,15 @@ def convert_units( unit_category = key break if not unit_category: - raise ValueError(f'Unknown unit `{unit}`') + msg = f'Unknown unit `{unit}`' + raise ValueError(msg) units = all_units[unit_category] if to_unit not in units: - raise ValueError( + msg = ( f'`{unit}` is a `{unit_category}` unit, but `{to_unit}` ' f'is not specified in that category.' ) + raise ValueError(msg) # convert units from_factor = units[unit] @@ -1541,6 +1560,8 @@ def stringterpolation( arguments: str, ) -> Callable[[np.ndarray], np.ndarray]: """ + Linear interpolation from strings. + Turns a string of specially formatted arguments into a multilinear interpolating function. @@ -1573,7 +1594,7 @@ def invert_mapping(original_dict: dict) -> dict: Parameters ---------- - original_dict : dict + original_dict: dict Dictionary with values that are lists of hashable items. Returns @@ -1594,23 +1615,30 @@ def invert_mapping(original_dict: dict) -> dict: for key, value_list in original_dict.items(): for value in value_list: if value in inverted_dict: - raise ValueError('Cannot invert mapping with duplicate values.') + msg = 'Cannot invert mapping with duplicate values.' + raise ValueError(msg) inverted_dict[value] = key return inverted_dict -def get(d: dict | None, path: str, default: Any | None = None) -> Any: +def get( + d: dict | None, + path: str, + default: Any | None = None, # noqa: ANN401 +) -> Any: # noqa: ANN401 """ - Retrieve a value from a nested dictionary using a path with '/' as - the separator. + Path-like dictionary value retrieval. + + Retrieves a value from a nested dictionary using a path with '/' + as the separator. Parameters ---------- - d : dict + d: dict The dictionary to search. - path : str + path: str The path to the desired value, with keys separated by '/'. - default : Any, optional + default: Any, optional The value to return if the path is not found. Defaults to None. @@ -1644,26 +1672,30 @@ def get(d: dict | None, path: str, default: Any | None = None) -> Any: try: for key in keys: current_dict = current_dict[key] - return current_dict + return current_dict # noqa: TRY300 except (KeyError, TypeError): return default def update( - d: dict[str, Any], path: str, value: Any, only_if_empty_or_none: bool = False + d: dict[str, Any], + path: str, + value: Any, # noqa: ANN401 + *, + only_if_empty_or_none: bool = False, ) -> None: """ Set a value in a nested dictionary using a path with '/' as the separator. Parameters ---------- - d : dict + d: dict The dictionary to update. - path : str + path: str The path to the desired value, with keys separated by '/'. - value : Any + value: Any The value to set at the specified path. - only_if_empty_or_none : bool, optional + only_if_empty_or_none: bool, optional If True, only update the value if it is None or an empty dictionary. Defaults to False. @@ -1681,8 +1713,8 @@ def update( >>> update(d, 'x/y/z', 2) >>> d {'x': {'y': {'z': 2}}} # value is updated to 2 - """ + """ keys = path.strip('/').split('/') current_dict = d for key in keys[:-1]: @@ -1698,14 +1730,16 @@ def update( def is_unspecified(d: dict[str, Any], path: str) -> bool: """ - Check if a value in a nested dictionary is either non-existent, + Check if something is specified. + + Checks if a value in a nested dictionary is either non-existent, None, NaN, or an empty dictionary or list. Parameters ---------- - d : dict + d: dict The dictionary to search. - path : str + path: str The path to the desired value, with keys separated by '/'. Returns @@ -1741,9 +1775,7 @@ def is_unspecified(d: dict[str, Any], path: str) -> bool: return True if value == {}: return True - if value == []: - return True - return False + return value == [] def is_specified(d: dict[str, Any], path: str) -> bool: @@ -1752,9 +1784,9 @@ def is_specified(d: dict[str, Any], path: str) -> bool: Parameters ---------- - d : dict + d: dict The dictionary to search. - path : str + path: str The path to the desired value, with keys separated by '/'. Returns @@ -1764,3 +1796,32 @@ def is_specified(d: dict[str, Any], path: str) -> bool: """ return not is_unspecified(d, path) + + +def ensure_value(value: T | None) -> T: + """ + Ensure a variable is not None. + + This function checks that the provided variable is not None. It is + used to assist with type hinting by avoiding repetitive `assert + value is not None` statements throughout the code. + + Parameters + ---------- + value : Optional[T] + The variable to check, which can be of any type or None. + + Returns + ------- + T + The same variable, guaranteed to be non-None. + + Raises + ------ + TypeError + If the provided variable is None. + + """ + if value is None: + raise TypeError + return value diff --git a/pelicun/file_io.py b/pelicun/file_io.py index 263d6ebff..67012a321 100644 --- a/pelicun/file_io.py +++ b/pelicun/file_io.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -40,17 +39,16 @@ # Kuanshi Zhong # John Vouvakis Manousakis -""" -This module has classes and methods that handle file input and output. - -""" +"""Classes and methods that handle file input and output.""" from __future__ import annotations + from pathlib import Path + import numpy as np import pandas as pd -from pelicun import base +from pelicun import base convert_dv_name = { 'DV_rec_cost': 'Reconstruction Cost', @@ -83,17 +81,18 @@ } -def save_to_csv( +def save_to_csv( # noqa: C901 data: pd.DataFrame | None, - filepath: str | None, + filepath: Path | None, units: pd.Series | None = None, unit_conversion_factors: dict | None = None, orientation: int = 0, + *, use_simpleindex: bool = True, log: base.Logger | None = None, ) -> pd.DataFrame | None: """ - Saves data to a CSV file following the standard SimCenter schema. + Save data to a CSV file following the standard SimCenter schema. The produced CSV files have a single header line and an index column. The second line may start with 'Units' in the index or the @@ -102,25 +101,25 @@ def save_to_csv( Parameters ---------- - data : DataFrame + data: DataFrame The data to save. - filepath : str + filepath: Path The location of the destination file. If None, the data is not saved, but returned in the end. - units : Series, optional + units: Series, optional Provides a Series with variables and corresponding units. - unit_conversion_factors : dict, optional + unit_conversion_factors: dict, optional Dictionary containing key-value pairs of unit names and their corresponding factors. Conversion factors are defined as the number of times a base unit fits in the alternative unit. - orientation : int, {0, 1}, default 0 + orientation: int, {0, 1}, default 0 If 0, variables are organized along columns; otherwise, they are along the rows. This is important when converting values to follow the prescribed units. - use_simpleindex : bool, default True + use_simpleindex: bool, default True If True, MultiIndex columns and indexes are converted to SimpleIndex before saving. - log : Logger, optional + log: Logger, optional Logger object to be used. If no object is specified, no logging is performed. @@ -139,45 +138,46 @@ def save_to_csv( If `filepath` is None, returns the DataFrame with potential unit conversions and reformatting applied. Otherwise, returns None after saving the data to a CSV file. - """ + """ if filepath is None: if log: log.msg('Preparing data ...', prepend_timestamp=False) elif log: - log.msg(f'Saving data to `{filepath}`...', prepend_timestamp=False) + log.msg(f'Saving data to `{filepath!s}`...', prepend_timestamp=False) if data is None: if log: - log.warn('Data was empty, no file saved.') + log.warning('Data was empty, no file saved.') return None + assert isinstance(data, pd.DataFrame) + # make sure we do not modify the original data data = data.copy() # convert units and add unit information, if needed if units is not None: - if unit_conversion_factors is None: - raise ValueError( + msg = ( 'When `units` is not None, ' '`unit_conversion_factors` must be provided.' ) + raise ValueError(msg) if log: log.msg('Converting units...', prepend_timestamp=False) # if the orientation is 1, we might not need to scale all columns if orientation == 1: - cols_to_scale_bool = [dt in [float, int] for dt in data.dtypes] + cols_to_scale_bool = [dt in {float, int} for dt in data.dtypes] cols_to_scale = data.columns[cols_to_scale_bool] labels_to_keep = [] for unit_name in units.unique(): - - labels = units.loc[units == unit_name].index.values + labels = units.loc[units == unit_name].index.to_numpy() unit_factor = 1.0 / unit_conversion_factors[unit_name] @@ -186,7 +186,7 @@ def save_to_csv( if orientation == 0: for label in labels: if label in data.columns: - active_labels.append(label) + active_labels.append(label) # noqa: PERF401 if len(active_labels) > 0: data.loc[:, active_labels] *= unit_factor @@ -194,14 +194,12 @@ def save_to_csv( else: # elif orientation == 1: for label in labels: if label in data.index: - active_labels.append(label) + active_labels.append(label) # noqa: PERF401 if len(active_labels) > 0: - # pylint: disable=possibly-used-before-assignment - data.loc[ - np.array(active_labels), np.array(cols_to_scale) - ] *= unit_factor - # pylint: enable=possibly-used-before-assignment + data.loc[np.array(active_labels), np.array(cols_to_scale)] *= ( + unit_factor + ) labels_to_keep += active_labels @@ -209,14 +207,15 @@ def save_to_csv( if orientation == 0: data = pd.concat([units_df.T, data], axis=0) - data.sort_index(axis=1, inplace=True) + data = data.sort_index(axis=1) else: data = pd.concat([units_df, data], axis=1) - data.sort_index(inplace=True) + data = data.sort_index() if log: log.msg('Unit conversion successful.', prepend_timestamp=False) + assert isinstance(data, pd.DataFrame) if use_simpleindex: # convert MultiIndex to regular index with '-' separators if isinstance(data.index, pd.MultiIndex): @@ -227,21 +226,19 @@ def save_to_csv( data = base.convert_to_SimpleIndex(data, axis=1) if filepath is not None: - - filepath_path = Path(filepath).resolve() - if filepath_path.suffix == '.csv': - + if filepath.suffix == '.csv': # save the contents of the DataFrame into a csv - data.to_csv(filepath_path) + data.to_csv(filepath) if log: log.msg('Data successfully saved to file.', prepend_timestamp=False) else: - raise ValueError( - f'ERROR: Please use the `.csv` file extension. ' - f'Received file name is `{filepath_path}`' + msg = ( + f'Please use the `.csv` file extension. ' + f'Received file name is `{filepath}`' ) + raise ValueError(msg) return None @@ -253,8 +250,7 @@ def substitute_default_path( data_paths: list[str | pd.DataFrame], ) -> list[str | pd.DataFrame]: """ - Substitutes the default directory path in a list of data paths - with a specified path. + Substitute the default directory path with a specified path. This function iterates over a list of data paths and replaces occurrences of the 'PelicunDefault/' substring with the path @@ -266,7 +262,7 @@ def substitute_default_path( Parameters ---------- - data_paths : list of str + data_paths: list of str A list containing the paths to data files. These paths may include a placeholder directory 'PelicunDefault/' that needs to be substituted with the actual path specified in @@ -287,8 +283,8 @@ def substitute_default_path( - If a path in the input list does not contain 'PelicunDefault/', it is added to the output list unchanged. - Example - ------- + Examples + -------- >>> data_paths = ['PelicunDefault/data/file1.txt', 'data/file2.txt'] >>> substitute_default_path(data_paths) @@ -309,16 +305,17 @@ def substitute_default_path( return updated_paths -def load_data( +def load_data( # noqa: C901 data_source: str | pd.DataFrame, unit_conversion_factors: dict | None, orientation: int = 0, + *, reindex: bool = True, return_units: bool = False, log: base.Logger | None = None, ) -> tuple[pd.DataFrame, pd.Series] | pd.DataFrame: """ - Loads data assuming it follows standard SimCenter tabular schema. + Load data assuming it follows standard SimCenter tabular schema. The data is assumed to have a single header line and an index column. The second line may start with 'Units' in the index and provide the units for @@ -363,12 +360,8 @@ def load_data( TypeError If `data_source` is neither a string nor a DataFrame, a TypeError is raised. - ValueError - If `unit_conversion_factors` contains keys that do not - correspond to any units in the data, a ValueError may be - raised during processing. - """ + """ if isinstance(data_source, pd.DataFrame): # store it at proceed (copying is needed to avoid changing the # original) @@ -377,7 +370,8 @@ def load_data( # otherwise, load the data from a file data = load_from_file(data_source) else: - raise TypeError(f'Invalid data_source type: {type(data_source)}') + msg = f'Invalid data_source type: {type(data_source)}' + raise TypeError(msg) # Define a dictionary to decide the axis based on the orientation axis = {0: 1, 1: 0} @@ -387,7 +381,7 @@ def load_data( # and optionally apply conversions to all numeric values if 'Units' in the_index: units = data['Units'] if orientation == 1 else data.loc['Units'] - data.drop(['Units'], axis=orientation, inplace=True) # type: ignore + data = data.drop(['Units'], axis=orientation) # type: ignore data = base.convert_dtypes(data) if unit_conversion_factors is not None: @@ -414,13 +408,15 @@ def load_data( data.loc[:, numeric_elements] = data.loc[ :, numeric_elements # type: ignore ].multiply( - conversion_factors, axis=axis[orientation] # type: ignore + conversion_factors, + axis=axis[orientation], # type: ignore ) # type: ignore else: data.loc[numeric_elements, :] = data.loc[ numeric_elements, : ].multiply( - conversion_factors, axis=axis[orientation] # type: ignore + conversion_factors, + axis=axis[orientation], # type: ignore ) # type: ignore if log: @@ -431,22 +427,22 @@ def load_data( data = base.convert_dtypes(data) # convert columns or index to MultiIndex if needed - data = base.convert_to_MultiIndex(data, axis=1) # type: ignore - data.sort_index(axis=1, inplace=True) + data = base.convert_to_MultiIndex(data, axis=1) + data = data.sort_index(axis=1) # reindex the data, if needed if reindex: data.index = pd.RangeIndex(start=0, stop=data.shape[0], step=1) else: # convert index to MultiIndex if needed - data = base.convert_to_MultiIndex(data, axis=0) # type: ignore - data.sort_index(inplace=True) + data = base.convert_to_MultiIndex(data, axis=0) + data = data.sort_index() if return_units: if units is not None: # convert index in units Series to MultiIndex if needed - units = base.convert_to_MultiIndex(units, axis=0).dropna() # type: ignore # noqa - units.sort_index(inplace=True) + units = base.convert_to_MultiIndex(units, axis=0).dropna() # type: ignore + units = units.sort_index() output = data, units else: output = data # type: ignore @@ -456,7 +452,7 @@ def load_data( def load_from_file(filepath: str, log: base.Logger | None = None) -> pd.DataFrame: """ - Loads data from a file and stores it in a DataFrame. + Load data from a file and stores it in a DataFrame. Currently, only CSV files are supported, but the function is easily extensible to support other file formats. @@ -464,7 +460,9 @@ def load_from_file(filepath: str, log: base.Logger | None = None) -> pd.DataFram Parameters ---------- filepath: string - The location of the source file + The location of the source file. + log: base.Logger, optional + Optional logger object. Returns ------- @@ -481,8 +479,8 @@ def load_from_file(filepath: str, log: base.Logger | None = None) -> pd.DataFram If the filepath is invalid. ValueError If the file is not a CSV. - """ + """ if log: log.msg(f'Loading data from {filepath}...') @@ -490,10 +488,11 @@ def load_from_file(filepath: str, log: base.Logger | None = None) -> pd.DataFram filepath_path = Path(filepath).resolve() if not filepath_path.is_file(): - raise FileNotFoundError( - f"The filepath provided does not point to an existing " - f"file: {filepath_path}" + msg = ( + f'The filepath provided does not point to an existing ' + f'file: {filepath_path}' ) + raise FileNotFoundError(msg) if filepath_path.suffix == '.csv': # load the contents of the csv into a DataFrame @@ -510,9 +509,10 @@ def load_from_file(filepath: str, log: base.Logger | None = None) -> pd.DataFram log.msg('File successfully opened.', prepend_timestamp=False) else: - raise ValueError( - f'ERROR: Unexpected file type received when trying ' + msg = ( + f'Unexpected file type received when trying ' f'to load from csv: {filepath_path}' ) + raise ValueError(msg) return data diff --git a/pelicun/model/__init__.py b/pelicun/model/__init__.py index d410a1d17..41aa1fa1f 100644 --- a/pelicun/model/__init__.py +++ b/pelicun/model/__init__.py @@ -33,7 +33,7 @@ # You should have received a copy of the BSD 3-Clause License along with # pelicun. If not, see . -"""Pelicun model.""" +"""Pelicun models.""" from __future__ import annotations diff --git a/pelicun/model/asset_model.py b/pelicun/model/asset_model.py index 0e428975d..96a7f33a2 100644 --- a/pelicun/model/asset_model.py +++ b/pelicun/model/asset_model.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,21 +37,20 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -This file defines the AssetModel object and its methods. -""" +"""AssetModel object and methods.""" from __future__ import annotations -from typing import TYPE_CHECKING -from typing import Any + from itertools import product +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable + import numpy as np import pandas as pd + +from pelicun import base, file_io, uq from pelicun.model.pelicun_model import PelicunModel -from pelicun import base -from pelicun import uq -from pelicun import file_io if TYPE_CHECKING: from pelicun.assessment import AssessmentBase @@ -61,17 +59,20 @@ class AssetModel(PelicunModel): - """ - Manages asset information used in assessments. + """Asset information used in assessments.""" - Parameters - ---------- + __slots__ = ['_cmp_RVs', 'cmp_marginal_params', 'cmp_sample', 'cmp_units'] - """ + def __init__(self, assessment: AssessmentBase) -> None: + """ + Initialize an Asset model. - __slots__ = ['cmp_marginal_params', 'cmp_units', 'cmp_sample', '_cmp_RVs'] + Parameters + ---------- + assessment: AssessmentBase + Parent assessment object. - def __init__(self, assessment: AssessmentBase): + """ super().__init__(assessment) self.cmp_marginal_params: pd.DataFrame | None = None @@ -81,27 +82,28 @@ def __init__(self, assessment: AssessmentBase): self._cmp_RVs: uq.RandomVariableRegistry | None = None def save_cmp_sample( - self, filepath: str | None = None, save_units: bool = False + self, filepath: str | None = None, *, save_units: bool = False ) -> pd.DataFrame | tuple[pd.DataFrame, pd.Series] | None: """ - Saves the component quantity sample to a CSV file or returns - it as a DataFrame with optional units. + Save or retrieve component quantity sample. - This method handles the storage of a sample of component - quantities, which can either be saved directly to a file or - returned as a DataFrame for further manipulation. When saving - to a file, additional information such as unit conversion - factors and column units can be included. If the data is not - being saved to a file, the method can return the DataFrame - with or without units as specified. + Saves the component quantity sample to a CSV file or returns + it as a DataFrame with optional units. This method handles + the storage of a sample of component quantities, which can + either be saved directly to a file or returned as a DataFrame + for further manipulation. When saving to a file, additional + information such as unit conversion factors and column units + can be included. If the data is not being saved to a file, the + method can return the DataFrame with or without units as + specified. Parameters ---------- - filepath : str, optional + filepath: str, optional The path to the file where the component quantity sample should be saved. If not provided, the sample is not saved to disk but returned. - save_units : bool, default: False + save_units: bool, default: False Indicates whether to include a row with unit information in the returned DataFrame. This parameter is ignored if a file path is provided. @@ -116,17 +118,12 @@ def save_cmp_sample( * Optionally, a Series containing the units for each column if `save_units` is True. - Raises - ------ - IOError - Raises an IOError if there is an issue saving the file to - the specified `filepath`. - Notes ----- The function utilizes internal logging to notify the start and completion of the saving process. It adjusts index types and handles unit conversions based on assessment configurations. + """ self.log.div() if filepath is not None: @@ -144,7 +141,7 @@ def save_cmp_sample( res = file_io.save_to_csv( sample, - filepath, + Path(filepath) if filepath is not None else None, units=units, unit_conversion_factors=self._asmnt.unit_conversion_factors, use_simpleindex=(filepath is not None), @@ -160,11 +157,11 @@ def save_cmp_sample( assert isinstance(res, pd.DataFrame) - units_part = res.loc["Units"] + units_part = res.loc['Units'] assert isinstance(units_part, pd.Series) units = units_part - res.drop("Units", inplace=True) + res = res.drop('Units') if save_units: return res.astype(float), units @@ -173,8 +170,7 @@ def save_cmp_sample( def load_cmp_sample(self, filepath: str) -> None: """ - Loads a component quantity sample from a specified CSV file - into the system. + Load a component quantity sample from a specified CSV file. This method reads a CSV file that contains component quantity samples, setting up the necessary DataFrame structures within @@ -184,7 +180,7 @@ def load_cmp_sample(self, filepath: str) -> None: Parameters ---------- - filepath : str + filepath: str The path to the CSV file from which to load the component quantity sample. @@ -210,6 +206,7 @@ def load_cmp_sample(self, filepath: str) -> None: >>> model.load_cmp_sample('path/to/component_sample.csv') # This will load the component quantity sample into the model # from the specified file. + """ self.log.div() self.log.msg('Loading asset components sample...') @@ -225,17 +222,20 @@ def load_cmp_sample(self, filepath: str) -> None: # Check if a `uid` level was passed num_levels = len(sample.columns.names) - if num_levels == 3: + num_levels_without_uid = 3 + num_levels_with_uid = num_levels_without_uid + 1 + if num_levels == num_levels_without_uid: # No `uid`, add one. sample.columns.names = ['cmp', 'loc', 'dir'] sample = base.dedupe_index(sample.T).T - elif num_levels == 4: + elif num_levels == num_levels_with_uid: sample.columns.names = ['cmp', 'loc', 'dir', 'uid'] else: - raise ValueError( + msg = ( f'Invalid component sample: Column MultiIndex ' f'has an unexpected length: {num_levels}' ) + raise ValueError(msg) self.cmp_sample = sample @@ -260,8 +260,7 @@ def load_cmp_sample(self, filepath: str) -> None: def load_cmp_model(self, data_source: str | dict[str, pd.DataFrame]) -> None: """ - Loads the model describing component quantities in an asset - from specified data sources. + Load the asset model from a specified data source. This function is responsible for loading data related to the component model of an asset. It supports loading from multiple @@ -272,7 +271,7 @@ def load_cmp_model(self, data_source: str | dict[str, pd.DataFrame]) -> None: Parameters ---------- - data_source : str or dict + data_source: str or dict The source from where to load the component model data. If it's a string, it should be the prefix for three files: one for marginal distributions (`_marginals.csv`), @@ -307,14 +306,6 @@ def load_cmp_model(self, data_source: str | dict[str, pd.DataFrame]) -> None: >>> model.load_cmp_model(data_dict) """ - - def get_attribute(attribute_str, dtype=float, default=np.nan): - # pylint: disable=missing-return-doc - # pylint: disable=missing-return-type-doc - if pd.isnull(attribute_str): - return default - return dtype(attribute_str) - self.log.div() self.log.msg('Loading component model...') @@ -364,11 +355,9 @@ def get_attribute(attribute_str, dtype=float, default=np.nan): if col == 'Blocks': cmp_marginal_param.extend( [ - get_attribute( - getattr(row, 'Blocks', np.nan), - dtype=int, - default=1.0, - ) + int(row.Blocks) # type: ignore + if ('Blocks' in dir(row) and not pd.isna(row.Blocks)) + else 1, ] * num_vals ) @@ -378,7 +367,7 @@ def get_attribute(attribute_str, dtype=float, default=np.nan): cmp_marginal_param.extend([getattr(row, col, np.nan)] * num_vals) else: cmp_marginal_param.extend( - [get_attribute(getattr(row, col, np.nan))] * num_vals + [str(getattr(row, col, np.nan))] * num_vals ) index_list.extend(indices) index = pd.MultiIndex.from_tuples(index_list, names=['cmp', 'loc', 'dir']) @@ -403,20 +392,20 @@ def get_attribute(attribute_str, dtype=float, default=np.nan): cmp_marginal_params = pd.concat(cmp_marginal_param_series, axis=1) assert not ( - cmp_marginal_params['Theta_0'].isnull().values.any() # type: ignore + cmp_marginal_params['Theta_0'].isna().to_numpy().any() # type: ignore ) - cmp_marginal_params.dropna(axis=1, how='all', inplace=True) + cmp_marginal_params = cmp_marginal_params.dropna(axis=1, how='all') self.log.msg( - "Model parameters successfully parsed. " - f"{cmp_marginal_params.shape[0]} performance groups identified", + 'Model parameters successfully parsed. ' + f'{cmp_marginal_params.shape[0]} performance groups identified', prepend_timestamp=False, ) # Now we can take care of converting the values to base units self.log.msg( - "Converting model parameters to internal units...", + 'Converting model parameters to internal units...', prepend_timestamp=False, ) @@ -431,11 +420,11 @@ def get_attribute(attribute_str, dtype=float, default=np.nan): self.cmp_marginal_params = cmp_marginal_params.drop('Units', axis=1) self.log.msg( - "Model parameters successfully loaded.", prepend_timestamp=False + 'Model parameters successfully loaded.', prepend_timestamp=False ) self.log.msg( - "\nComponent model marginal distributions:\n" + str(cmp_marginal_params), + '\nComponent model marginal distributions:\n' + str(cmp_marginal_params), prepend_timestamp=False, ) @@ -443,7 +432,7 @@ def get_attribute(attribute_str, dtype=float, default=np.nan): def list_unique_component_ids(self) -> list[str]: """ - Returns unique component IDs. + Obtain unique component IDs. Returns ------- @@ -452,11 +441,12 @@ def list_unique_component_ids(self) -> list[str]: """ assert self.cmp_marginal_params is not None - cmp_list = self.cmp_marginal_params.index.unique(level=0).to_list() - return cmp_list + return self.cmp_marginal_params.index.unique(level=0).to_list() def generate_cmp_sample(self, sample_size: int | None = None) -> None: """ + Generate a component sample. + Generates a sample of component quantity realizations based on predefined model parameters and optionally specified sample size. If no sample size is provided, the function attempts to @@ -475,25 +465,27 @@ def generate_cmp_sample(self, sample_size: int | None = None) -> None: If the model parameters are not loaded before sample generation, or if neither sample size is specified nor can be determined from the demand model. - """ + """ if self.cmp_marginal_params is None: - raise ValueError( - 'Model parameters have not been specified. Load' + msg = ( + 'Model parameters have not been specified. Load ' 'parameters from a file before generating a ' 'sample.' ) + raise ValueError(msg) self.log.div() self.log.msg('Generating sample from component quantity variables...') if sample_size is None: if self._asmnt.demand.sample is None: - raise ValueError( + msg = ( 'Sample size was not specified, ' 'and it cannot be determined from ' 'the demand model.' ) + raise ValueError(msg) sample_size = self._asmnt.demand.sample.shape[0] self._create_cmp_RVs() @@ -505,8 +497,8 @@ def generate_cmp_sample(self, sample_size: int | None = None) -> None: ) cmp_sample = pd.DataFrame(self._cmp_RVs.RV_sample) - cmp_sample.sort_index(axis=0, inplace=True) - cmp_sample.sort_index(axis=1, inplace=True) + cmp_sample = cmp_sample.sort_index(axis=0) + cmp_sample = cmp_sample.sort_index(axis=1) cmp_sample_mi = base.convert_to_MultiIndex(cmp_sample, axis=1)['CMP'] assert isinstance(cmp_sample_mi, pd.DataFrame) cmp_sample = cmp_sample_mi @@ -514,17 +506,14 @@ def generate_cmp_sample(self, sample_size: int | None = None) -> None: self.cmp_sample = cmp_sample self.log.msg( - f"\nSuccessfully generated {sample_size} realizations.", + f'\nSuccessfully generated {sample_size} realizations.', prepend_timestamp=False, ) - def _create_cmp_RVs(self) -> None: - """ - Defines the RVs used for sampling component quantities. - """ - + def _create_cmp_RVs(self) -> None: # noqa: N802 + """Define the RVs used for sampling component quantities.""" # initialize the registry - RV_reg = uq.RandomVariableRegistry(self._asmnt.options.rng) + rv_reg = uq.RandomVariableRegistry(self._asmnt.options.rng) # add a random variable for each component quantity variable assert self.cmp_marginal_params is not None @@ -532,24 +521,24 @@ def _create_cmp_RVs(self) -> None: cmp = rv_params.Index # create a random variable and add it to the registry - family = getattr(rv_params, "Family", 'deterministic') - RV_reg.add_RV( + family = getattr(rv_params, 'Family', 'deterministic') + rv_reg.add_RV( uq.rv_class_map(family)( name=f'CMP-{cmp[0]}-{cmp[1]}-{cmp[2]}-{cmp[3]}', # type: ignore theta=[ # type: ignore - getattr(rv_params, f"Theta_{t_i}", np.nan) + getattr(rv_params, f'Theta_{t_i}', np.nan) for t_i in range(3) ], truncation_limits=[ - getattr(rv_params, f"Truncate{side}", np.nan) - for side in ("Lower", "Upper") + getattr(rv_params, f'Truncate{side}', np.nan) + for side in ('Lower', 'Upper') ], ) ) self.log.msg( - f"\n{self.cmp_marginal_params.shape[0]} random variables created.", + f'\n{self.cmp_marginal_params.shape[0]} random variables created.', prepend_timestamp=False, ) - self._cmp_RVs = RV_reg + self._cmp_RVs = rv_reg diff --git a/pelicun/model/damage_model.py b/pelicun/model/damage_model.py index b3f5ea1b8..589b5ee35 100644 --- a/pelicun/model/damage_model.py +++ b/pelicun/model/damage_model.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,46 +37,55 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -This file defines the DamageModel object and its methods. -""" +"""DamageModel object and methods.""" from __future__ import annotations -from typing import TYPE_CHECKING + from functools import partial +from pathlib import Path +from typing import TYPE_CHECKING + import numpy as np import pandas as pd + +from pelicun import base, file_io, uq +from pelicun.model.demand_model import ( + _assemble_required_demand_data, + _get_required_demand_type, + _verify_edps_available, +) from pelicun.model.pelicun_model import PelicunModel -from pelicun.model.demand_model import _get_required_demand_type -from pelicun.model.demand_model import _assemble_required_demand_data -from pelicun.model.demand_model import _verify_edps_available -from pelicun import base -from pelicun import uq -from pelicun import file_io if TYPE_CHECKING: from pelicun.assessment import AssessmentBase + from pelicun.uq import RandomVariableRegistry idx = base.idx class DamageModel(PelicunModel): - """ - Manages damage information used in assessments. - - """ + """Manages damage information used in assessments.""" __slots__ = ['ds_model', 'missing_components'] - def __init__(self, assessment: AssessmentBase): + def __init__(self, assessment: AssessmentBase) -> None: + """ + Initialize a Damage model. + + Parameters + ---------- + assessment: AssessmentBase + The parent assessment object. + + """ super().__init__(assessment) self.ds_model: DamageModel_DS = DamageModel_DS(assessment) self.missing_components: list[str] = [] @property - def _damage_models(self): + def _damage_models(self) -> tuple[DamageModel_DS]: """ Points to the damage model objects included in DamageModel. @@ -90,13 +98,10 @@ def _damage_models(self): return (self.ds_model,) def load_damage_model( - self, data_paths: list[str | pd.DataFrame], warn_missing: bool = False + self, data_paths: list[str | pd.DataFrame], *, warn_missing: bool = False ) -> None: - """ - - - """ - self.log.warn( + """.""" + self.log.warning( '`load_damage_model` is deprecated and will be ' 'dropped in future versions of pelicun. ' 'Please use `load_model_parameters` instead, ' @@ -107,12 +112,12 @@ def load_damage_model( 'load_model_parameters(data_paths, cmp_set)`.' ) cmp_set = set(self._asmnt.asset.list_unique_component_ids()) - self.load_model_parameters(data_paths, cmp_set, warn_missing) + self.load_model_parameters(data_paths, cmp_set, warn_missing=warn_missing) @property def sample(self) -> pd.DataFrame: """ - + . Returns ------- @@ -120,7 +125,7 @@ def sample(self) -> pd.DataFrame: The damage sample of the `ds_model`. """ - self.log.warn( + self.log.warning( '`{damage model}.sample` is deprecated and will be ' 'dropped in future versions of pelicun. ' 'Please use `{damage model}.ds_model.sample` instead. ' @@ -133,6 +138,7 @@ def load_model_parameters( self, data_paths: list[str | pd.DataFrame], cmp_set: set[str], + *, warn_missing: bool = False, ) -> None: """ @@ -152,7 +158,7 @@ def load_model_parameters( Damage parameters in the input files for components outside of that set are omitted for performance. warn_missing: bool - Wether to check if there are components in the asset model + Whether to check if there are components in the asset model that do not have specified damage parameters. Should be set to True if all components in the asset model are damage state-driven, or if only a damage estimation is @@ -165,15 +171,14 @@ def load_model_parameters( specified paths. """ - self.log.div() self.log.msg('Loading damage model...', prepend_timestamp=False) # for i, path in enumerate(data_paths): if 'fragility_DB' in path: - path = path.replace('fragility_DB', 'damage_DB') - self.log.warn( + path = path.replace('fragility_DB', 'damage_DB') # noqa: PLW2901 + self.log.warning( '`fragility_DB` is deprecated and will ' 'be dropped in future versions of pelicun. ' 'Please use `damage_DB` instead.' @@ -195,9 +200,10 @@ def load_model_parameters( # states assert isinstance(data, pd.DataFrame) if _is_for_ds_model(data): - self.ds_model._load_model_parameters(data) + self.ds_model.load_model_parameters(data) else: - raise ValueError(f'Invalid damage model parameters: {data_path}') + msg = f'Invalid damage model parameters: {data_path}' + raise ValueError(msg) self.log.msg( 'Damage model parameters loaded successfully.', prepend_timestamp=False @@ -213,9 +219,9 @@ def load_model_parameters( for damage_model in self._damage_models: # drop unused damage parameter definitions - damage_model._drop_unused_damage_parameters(cmp_set) + damage_model.drop_unused_damage_parameters(cmp_set) # remove components with incomplete damage parameters - damage_model._remove_incomplete_components() + damage_model.remove_incomplete_components() # # convert units @@ -225,7 +231,7 @@ def load_model_parameters( 'Converting damage model parameter units.', prepend_timestamp=False ) for damage_model in self._damage_models: - damage_model._convert_damage_parameter_units() + damage_model.convert_damage_parameter_units() # # verify damage parameter availability @@ -237,7 +243,7 @@ def load_model_parameters( prepend_timestamp=False, ) missing_components = self._ensure_damage_parameter_availability( - cmp_set, warn_missing + cmp_set, warn_missing=warn_missing ) self.missing_components = missing_components @@ -268,7 +274,6 @@ def calculate( subtraction, '*' for multiplication, and '/' for division. """ - self.log.div() self.log.msg('Calculating damages...') @@ -300,7 +305,7 @@ def calculate( # obtain damage states for applicable components assert self._asmnt.demand.sample is not None - self.ds_model._obtain_ds_sample( + self.ds_model.obtain_ds_sample( demand_sample=self._asmnt.demand.sample, component_blocks=component_blocks, block_batch_size=block_batch_size, @@ -311,20 +316,20 @@ def calculate( # Apply the prescribed damage process, if any if dmg_process is not None: - self.log.msg("Applying damage processes.") + self.log.msg('Applying damage processes.') # Sort the damage processes tasks dmg_process = {key: dmg_process[key] for key in sorted(dmg_process)} # Perform damage tasks in the sorted order for task in dmg_process.items(): - self.ds_model._perform_dmg_task(task) + self.ds_model.perform_dmg_task(task) self.log.msg( - "Damage processes successfully applied.", prepend_timestamp=False + 'Damage processes successfully applied.', prepend_timestamp=False ) - qnt_sample = self.ds_model._prepare_dmg_quantities( + qnt_sample = self.ds_model.prepare_dmg_quantities( self._asmnt.asset.cmp_sample, self._asmnt.asset.cmp_marginal_params, dropzero=False, @@ -332,16 +337,18 @@ def calculate( # If requested, extend the quantity table with all possible DSs if self._asmnt.options.list_all_ds: - qnt_sample = self.ds_model._complete_ds_cols(qnt_sample) + qnt_sample = self.ds_model.complete_ds_cols(qnt_sample) self.ds_model.sample = qnt_sample self.log.msg('Damage calculation completed.', prepend_timestamp=False) def save_sample( - self, filepath: str | None = None, save_units: bool = False + self, filepath: str | None = None, *, save_units: bool = False ) -> pd.DataFrame | tuple[pd.DataFrame, pd.Series] | None: """ + Save or return the damage sample data. + Saves the damage sample data to a CSV file or returns it directly with an option to include units. @@ -353,11 +360,11 @@ def save_sample( Parameters ---------- - filepath : str, optional + filepath: str, optional The path to the file where the damage sample should be saved. If not provided, the sample is not saved to disk but returned. - save_units : bool, default: False + save_units: bool, default: False Indicates whether to include a row with unit information in the returned DataFrame. This parameter is ignored if a file path is provided. @@ -371,6 +378,7 @@ def save_sample( - DataFrame containing the damage sample. - Optionally, a Series containing the units for each column if `save_units` is True. + """ self.log.div() self.log.msg('Saving damage sample...') @@ -388,7 +396,7 @@ def save_sample( res = file_io.save_to_csv( self.ds_model.sample, - filepath, + Path(filepath) if filepath is not None else None, units=qnt_units, unit_conversion_factors=self._asmnt.unit_conversion_factors, use_simpleindex=(filepath is not None), @@ -403,9 +411,9 @@ def save_sample( # else: assert isinstance(res, pd.DataFrame) - units = res.loc["Units"] + units = res.loc['Units'] assert isinstance(units, pd.Series) - res.drop("Units", inplace=True) + res = res.drop('Units') res.index = res.index.astype('int64') res = res.astype(float) assert isinstance(res, pd.DataFrame) @@ -416,10 +424,7 @@ def save_sample( return res def load_sample(self, filepath: str) -> None: - """ - Load damage state sample data. - - """ + """Load damage state sample data.""" self.log.div() self.log.msg('Loading damage sample...') @@ -435,17 +440,17 @@ def load_sample(self, filepath: str) -> None: self.log.msg('Damage sample successfully loaded.', prepend_timestamp=False) def _ensure_damage_parameter_availability( - self, cmp_set: set[str], warn_missing: bool + self, cmp_set: set[str], *, warn_missing: bool ) -> list[str]: """ - Makes sure that all components have damage parameters. + Make sure that all components have damage parameters. Parameters ---------- cmp_set: list List of component IDs in the asset model. warn_missing: bool - Wether to issue a warning if missing components are found. + Whether to issue a warning if missing components are found. Returns ------- @@ -453,7 +458,6 @@ def _ensure_damage_parameter_availability( List of component IDs with missing damage parameters. """ - available_components = self._get_component_id_set() missing_components = [ @@ -463,18 +467,23 @@ def _ensure_damage_parameter_availability( ] if missing_components and warn_missing: - self.log.warn( - f"The damage model does not provide " - f"damage information for the following component(s) " - f"in the asset model: {missing_components}." + self.log.warning( + f'The damage model does not provide ' + f'damage information for the following component(s) ' + f'in the asset model: {missing_components}.' ) return missing_components def _get_component_id_set(self) -> set[str]: """ - Get a set of components for which damage parameters are - available. + Get a set of components with available damage parameters. + + Returns + ------- + set + Set of components with available damage parameters. + """ cmp_list = [] if self.ds_model.damage_params is not None: @@ -483,22 +492,30 @@ def _get_component_id_set(self) -> set[str]: class DamageModel_Base(PelicunModel): - """ - Base class for damage models - - """ + """Base class for damage models.""" __slots__ = ['damage_params', 'sample'] - def __init__(self, assessment: AssessmentBase): + def __init__(self, assessment: AssessmentBase) -> None: + """ + Initialize the object. + + Parameters + ---------- + assessment: AssessmentBase + Parent assessment object. + + """ super().__init__(assessment) self.damage_params: pd.DataFrame | None = None self.sample: pd.DataFrame | None = None - def _load_model_parameters(self, data: pd.DataFrame) -> None: + def load_model_parameters(self, data: pd.DataFrame) -> None: """ - Load model parameters from a DataFrame, extending those + Load model parameters from a DataFrame. + + Loads model parameters from a DataFrame, extending those already available. Parameters already defined take precedence, i.e. redefinitions of parameters are ignored. @@ -508,37 +525,35 @@ def _load_model_parameters(self, data: pd.DataFrame) -> None: Data with damage model information. """ - if self.damage_params is not None: data = pd.concat((self.damage_params, data), axis=0) # drop redefinitions of components data = data.groupby(data.index).first() - # TODO: load defaults for Demand-Offset and Demand-Directional + # TODO(AZ): load defaults for Demand-Offset and Demand-Directional self.damage_params = data - def _convert_damage_parameter_units(self) -> None: - """ - Converts previously loaded damage parameters to base units. - - """ + def convert_damage_parameter_units(self) -> None: + """Convert previously loaded damage parameters to base units.""" if self.damage_params is None: return - units = self.damage_params[('Demand', 'Unit')] - self.damage_params.drop(('Demand', 'Unit'), axis=1, inplace=True) - for LS_i in self.damage_params.columns.unique(level=0): - if LS_i.startswith('LS'): - params = self.damage_params.loc[:, LS_i].copy() + units = self.damage_params['Demand', 'Unit'] + self.damage_params = self.damage_params.drop(('Demand', 'Unit'), axis=1) + for ls_i in self.damage_params.columns.unique(level=0): + if ls_i.startswith('LS'): + params = self.damage_params.loc[:, ls_i].copy() assert isinstance(params, pd.DataFrame) - self.damage_params.loc[:, LS_i] = self._convert_marginal_params( + self.damage_params.loc[:, ls_i] = self._convert_marginal_params( params, units - ).values + ).to_numpy() - def _remove_incomplete_components(self) -> None: + def remove_incomplete_components(self) -> None: """ + Remove components with incompelte damage parameter info. + Removes components that have incomplete damage model definitions from the damage model parameters. @@ -550,13 +565,15 @@ def _remove_incomplete_components(self) -> None: return cmp_incomplete_idx = self.damage_params.loc[ - self.damage_params[('Incomplete', '')] == 1 + self.damage_params['Incomplete', ''] == 1 ].index - self.damage_params.drop(cmp_incomplete_idx, inplace=True) + self.damage_params = self.damage_params.drop(cmp_incomplete_idx) - def _drop_unused_damage_parameters(self, cmp_set: set[str]) -> None: + def drop_unused_damage_parameters(self, cmp_set: set[str]) -> None: """ + Remove info for non existent components. + Removes damage parameter definitions for component IDs not present in the given list. @@ -565,6 +582,7 @@ def _drop_unused_damage_parameters(self, cmp_set: set[str]) -> None: cmp_set: set Set of component IDs to be preserved in the damage parameters. + """ if self.damage_params is None: return @@ -578,8 +596,7 @@ def _get_pg_batches( missing_components: list[str], ) -> pd.DataFrame: """ - Group performance groups into batches for efficient damage - assessment. + Group performance groups into batches for efficiency. The method takes as input the block_batch_size, which specifies the maximum number of blocks per batch. The method @@ -605,9 +622,8 @@ def _get_pg_batches( Parameters ---------- - component_blocks: pd.DataFrame - DataFrame containing a singe column, `Blocks`, which lists + DataFrame containing a single column, `Blocks`, which lists the number of blocks for each (`cmp`-`loc`-`dir`-`uid`). block_batch_size: int Maximum number of components in each batch. @@ -627,11 +643,10 @@ def _get_pg_batches( block batch size. """ - # A warning has already been issued for components with # missing damage parameters (in # `DamageModel._ensure_damage_parameter_availability`). - component_blocks.drop(pd.Index(missing_components), inplace=True) + component_blocks = component_blocks.drop(pd.Index(missing_components)) # It is safe to simply disregard components that are not # present in the `damage_params` of *this* model, and let them @@ -646,11 +661,11 @@ def _get_pg_batches( component_blocks = component_blocks.groupby( ['loc', 'dir', 'cmp', 'uid'] ).sum() - component_blocks.sort_index(axis=0, inplace=True) + component_blocks = component_blocks.sort_index(axis=0) # Calculate cumulative sum of blocks component_blocks['CBlocks'] = np.cumsum( - component_blocks['Blocks'].values.astype(int) + component_blocks['Blocks'].to_numpy().astype(int) ) component_blocks['Batch'] = 0 @@ -669,7 +684,7 @@ def _get_pg_batches( ) if np.sum(batch_mask) < 1: - batch_mask = np.full(batch_mask.shape, False) + batch_mask = np.full(batch_mask.shape, fill_value=False) batch_mask[np.where(component_blocks['CBlocks'] > 0)[0][0]] = True component_blocks.loc[batch_mask, 'Batch'] = batch_i @@ -694,28 +709,32 @@ def _get_pg_batches( .loc[:, 'Blocks'] .to_frame() ) - component_blocks = component_blocks.sort_index( + return component_blocks.sort_index( level=['Batch', 'cmp', 'loc', 'dir', 'uid'] ) - return component_blocks class DamageModel_DS(DamageModel_Base): - """ - Damage model for components that have discrete Damage States (DS). - - """ + """Damage model for components that have discrete Damage States (DS).""" __slots__ = ['ds_sample'] - def __init__(self, assessment: AssessmentBase): + def __init__(self, assessment: AssessmentBase) -> None: + """ + Initialize the object. + + Parameters + ---------- + assessment: AssessmentBase + Parent assessment object. + + """ super().__init__(assessment) self.ds_sample: pd.DataFrame | None = None def probabilities(self) -> pd.DataFrame: """ - Returns the probability of each observed damage state in the - sample. + Return the probability of each observed damage state. Returns ------- @@ -749,7 +768,7 @@ def probabilities(self) -> pd.DataFrame: .sort_index(axis=0) ) - def _obtain_ds_sample( + def obtain_ds_sample( self, demand_sample: pd.DataFrame, component_blocks: pd.DataFrame, @@ -758,11 +777,7 @@ def _obtain_ds_sample( missing_components: list[str], nondirectional_multipliers: dict[str, float], ) -> None: - """ - Obtain the damage state of each performance group in the - model. - """ - + """Obtain the damage state of each performance group.""" # Break up damage calculation and perform it by performance group. # Compared to the simultaneous calculation of all PGs, this approach # reduces demands on memory and increases the load on CPU. This leads @@ -783,19 +798,18 @@ def _obtain_ds_sample( ) self.log.msg( - f"{len(batches)} batches of Performance Groups prepared " - "for damage assessment", + f'{len(batches)} batches of Performance Groups prepared ' + 'for damage assessment', prepend_timestamp=False, ) # for PG_i in self._asmnt.asset.cmp_sample.columns: ds_samples = [] - for PGB_i in batches: - - performance_group = component_blocks.loc[PGB_i] + for pgb_i in batches: + performance_group = component_blocks.loc[pgb_i] self.log.msg( - f"Calculating damage states for PG batch {PGB_i} with " + f"Calculating damage states for PG batch {pgb_i} with " f"{int(performance_group['Blocks'].sum())} blocks" ) @@ -849,12 +863,14 @@ def _obtain_ds_sample( self.ds_sample = pd.concat(ds_samples, axis=1) - self.log.msg("Damage state calculation successful.", prepend_timestamp=False) + self.log.msg('Damage state calculation successful.', prepend_timestamp=False) - def _handle_operation( + def _handle_operation( # noqa: PLR6301 self, initial_value: float, operation: str, other_value: float ) -> float: """ + Handle a capacity adjustment operation. + This method is used in `_create_dmg_RVs` to apply capacity adjustment operations whenever required. It is defined as a safer alternative to directly using `eval`. @@ -887,26 +903,29 @@ def _handle_operation( return initial_value * other_value if operation == '/': return initial_value / other_value - raise ValueError(f'Invalid operation: `{operation}`') + msg = f'Invalid operation: `{operation}`' + raise ValueError(msg) def _generate_dmg_sample( self, sample_size: int, - PGB: pd.DataFrame, + pgb: pd.DataFrame, scaling_specification: dict | None = None, ) -> tuple[pd.DataFrame, pd.DataFrame]: """ - This method generates a damage sample by creating random - variables (RVs) for capacities and limit-state-damage-states - (lsds), and then sampling from these RVs. The sample size and - performance group batches (PGB) are specified as inputs. The - method returns the capacity sample and the lsds sample. + Generate the damage sample. + + Generates a damage sample by creating random variables (RVs) + for capacities and limit-state-damage-states (lsds), and then + sampling from these RVs. The sample size and performance group + batches (PGB) are specified as inputs. The method returns the + capacity sample and the lsds sample. Parameters ---------- - sample_size : int + sample_size: int The number of realizations to generate. - PGB : DataFrame + pgb: DataFrame A DataFrame that groups performance groups into batches for efficient damage assessment. scaling_specification: dict, optional @@ -920,9 +939,9 @@ def _generate_dmg_sample( Returns ------- - capacity_sample : DataFrame + capacity_sample: DataFrame A DataFrame that represents the capacity sample. - lsds_sample : DataFrame + lsds_sample: DataFrame A DataFrame that represents the . Raises @@ -933,36 +952,37 @@ def _generate_dmg_sample( """ # Check if damage model parameters have been specified if self.damage_params is None: - raise ValueError( + msg = ( 'Damage model parameters have not been specified. ' 'Load parameters from the default damage model ' 'databases or provide your own damage model ' 'definitions before generating a sample.' ) + raise ValueError(msg) # Create capacity and LSD RVs for each performance group - capacity_RVs, lsds_RVs = self._create_dmg_RVs(PGB, scaling_specification) + capacity_rvs, lsds_rvs = self._create_dmg_RVs(pgb, scaling_specification) if self._asmnt.log.verbose: self.log.msg('Sampling capacities...', prepend_timestamp=True) # Generate samples for capacity RVs assert self._asmnt.options.sampling_method is not None - capacity_RVs.generate_sample( + capacity_rvs.generate_sample( sample_size=sample_size, method=self._asmnt.options.sampling_method ) # Generate samples for LSD RVs - lsds_RVs.generate_sample( + lsds_rvs.generate_sample( sample_size=sample_size, method=self._asmnt.options.sampling_method ) if self._asmnt.log.verbose: - self.log.msg("Raw samples are available", prepend_timestamp=True) + self.log.msg('Raw samples are available', prepend_timestamp=True) # get the capacity and lsds samples capacity_sample = ( - pd.DataFrame(capacity_RVs.RV_sample) + pd.DataFrame(capacity_rvs.RV_sample) .sort_index(axis=0) .sort_index(axis=1) ) @@ -974,7 +994,7 @@ def _generate_dmg_sample( capacity_sample.columns.names = ['cmp', 'loc', 'dir', 'uid', 'block', 'ls'] lsds_sample = ( - pd.DataFrame(lsds_RVs.RV_sample) + pd.DataFrame(lsds_rvs.RV_sample) .sort_index(axis=0) .sort_index(axis=1) .astype(int) @@ -986,7 +1006,7 @@ def _generate_dmg_sample( if self._asmnt.log.verbose: self.log.msg( - f"Successfully generated {sample_size} realizations.", + f'Successfully generated {sample_size} realizations.', prepend_timestamp=True, ) @@ -1000,7 +1020,7 @@ def _evaluate_damage_state( lsds_sample: pd.DataFrame, ) -> pd.DataFrame: """ - Use the demand and LS capacity sample to evaluate damage states + Use the demand and LS capacity sample to evaluate damage states. Parameters ---------- @@ -1020,8 +1040,8 @@ def _evaluate_damage_state( DataFrame Assigns a Damage State to each component block in the asset model. - """ + """ if self._asmnt.log.verbose: self.log.msg('Evaluating damage states...', prepend_timestamp=True) @@ -1037,28 +1057,28 @@ def _evaluate_damage_state( # For each demand type in the demand dictionary for demand_name, demand_vals in demand_dict.items(): # Get the list of PGs assigned to this demand type - PG_list = required_edps[demand_name] + pg_list = required_edps[demand_name] # Create a list of columns for the demand data # corresponding to each PG in the PG_list - PG_cols = pd.concat( - [dmg_eval.loc[:1, PG_i] for PG_i in PG_list], # type: ignore + pg_cols = pd.concat( + [dmg_eval.loc[:1, PG_i] for PG_i in pg_list], # type: ignore axis=1, - keys=PG_list, + keys=pg_list, ).columns - PG_cols.names = ['cmp', 'loc', 'dir', 'uid', 'block', 'ls'] + pg_cols.names = ['cmp', 'loc', 'dir', 'uid', 'block', 'ls'] # Create a DataFrame with demand values repeated for the # number of PGs and assign the columns as PG_cols demand_df.append( pd.concat( - [pd.Series(demand_vals)] * len(PG_cols), axis=1, keys=PG_cols + [pd.Series(demand_vals)] * len(pg_cols), axis=1, keys=pg_cols ) ) # Concatenate all demand DataFrames into a single DataFrame demand_df_concat = pd.concat(demand_df, axis=1) # Sort the columns of the demand DataFrame - demand_df_concat.sort_index(axis=1, inplace=True) + demand_df_concat = demand_df_concat.sort_index(axis=1) # Evaluate the damage exceedance by subtracting demand from # capacity and checking if the result is less than zero @@ -1066,7 +1086,7 @@ def _evaluate_damage_state( # Remove any columns with NaN values from the damage # exceedance DataFrame - dmg_eval.dropna(axis=1, inplace=True) + dmg_eval = dmg_eval.dropna(axis=1) # initialize the DataFrames that store the damage states and # quantities @@ -1081,11 +1101,11 @@ def _evaluate_damage_state( ls_list = dmg_eval.columns.get_level_values(5).unique() # for each consecutive limit state... - for LS_id in ls_list: + for ls_id in ls_list: # get all cmp - loc - dir - block where this limit state occurs dmg_e_ls = dmg_eval.loc[ :, # type: ignore - idx[:, :, :, :, :, LS_id], + idx[:, :, :, :, :, ls_id], ].dropna(axis=1) # Get the damage states corresponding to this limit state in each @@ -1115,11 +1135,11 @@ def _evaluate_damage_state( return ds_sample - def _create_dmg_RVs( - self, PGB: pd.DataFrame, scaling_specification: dict | None = None + def _create_dmg_RVs( # noqa: N802, C901 + self, pgb: pd.DataFrame, scaling_specification: dict | None = None ) -> tuple[uq.RandomVariableRegistry, uq.RandomVariableRegistry]: """ - Creates random variables required later for the damage calculation. + Create random variables for the damage calculation. The method initializes two random variable registries, capacity_RV_reg and lsds_RV_reg, and loops through each @@ -1136,7 +1156,7 @@ def _create_dmg_RVs( Parameters ---------- - PGB : DataFrame + pgb: DataFrame A DataFrame that groups performance groups into batches for efficient damage assessment. scaling_specification: dict, optional @@ -1160,13 +1180,21 @@ def _create_dmg_RVs( ValueError Raises an error if the scaling specification is invalid or if the input DataFrame does not meet the expected format. - Also, raises errors if there are any issues with the types - or formats of the data in the input DataFrame. + TypeError + If there are any issues with the types of the data in the + input DataFrame. """ - def assign_lsds(ds_weights, ds_id, lsds_RV_reg, lsds_rv_tag): + def assign_lsds( + ds_weights: str | None, + ds_id: int, + lsds_rv_reg: RandomVariableRegistry, + lsds_rv_tag: str, + ) -> int: """ + Assign limit states to damage states. + Assigns limit states to damage states using random variables, updating the provided random variable registry. This function either creates a deterministic random @@ -1175,19 +1203,19 @@ def assign_lsds(ds_weights, ds_id, lsds_RV_reg, lsds_rv_tag): Parameters ---------- - ds_weights : str or None + ds_weights: str or None A string representing the weights of different damage states associated with a limit state, separated by '|'. If None, indicates that there is only one damage state associated with the limit state. - ds_id : int + ds_id: int The starting index for damage state IDs. This ID helps in mapping damage states to limit states. - lsds_RV_reg : RandomVariableRegistry + lsds_rv_reg: RandomVariableRegistry The registry where the newly created random variables (for mapping limit states to damage states) will be added. - lsds_rv_tag : str + lsds_rv_tag: str A unique identifier for the random variable being created, typically including identifiers for component, location, direction, and limit state. @@ -1206,39 +1234,42 @@ def assign_lsds(ds_weights, ds_id, lsds_RV_reg, lsds_rv_tag): probabilistic damage assessments. It dynamically adjusts to the number of damage states specified and applies a mapping function to correctly assign state IDs. + """ # If the limit state has a single damage state assigned # to it, we don't need random sampling - if pd.isnull(ds_weights): + if pd.isna(ds_weights): ds_id += 1 - - lsds_RV_reg.add_RV( + lsds_rv_reg.add_RV( uq.DeterministicRandomVariable( name=lsds_rv_tag, - theta=ds_id, + theta=np.array((ds_id,)), ) ) # Otherwise, we create a multinomial random variable else: + assert isinstance(ds_weights, str) # parse the DS weights - ds_weights = np.array( - ds_weights.replace(" ", "").split('|'), dtype=float + ds_weights_np = np.array( + ds_weights.replace(' ', '').split('|'), dtype=float ) - def map_ds(values, offset): + def map_ds(values: np.ndarray, offset: int) -> np.ndarray: """ + Map DS indices to damage states. + Maps an array of damage state indices to their corresponding actual state IDs by applying an offset. Parameters ---------- - values : array-like + values: array-like An array of indices representing damage states. These indices are typically sequential integers starting from zero. - offset : int + offset: int The value to be added to each element in `values` to obtain the actual damage state IDs. @@ -1249,18 +1280,19 @@ def map_ds(values, offset): An array where each original index from `values` has been incremented by `offset` to reflect its actual damage state ID. + """ return values + offset - lsds_RV_reg.add_RV( + lsds_rv_reg.add_RV( uq.MultinomialRandomVariable( name=lsds_rv_tag, - theta=ds_weights, + theta=ds_weights_np, f_map=partial(map_ds, offset=ds_id + 1), ) ) - ds_id += len(ds_weights) + ds_id += len(ds_weights_np) return ds_id @@ -1268,8 +1300,8 @@ def map_ds(values, offset): self.log.msg('Generating capacity variables ...', prepend_timestamp=True) # initialize the registry - capacity_RV_reg = uq.RandomVariableRegistry(self._asmnt.options.rng) - lsds_RV_reg = uq.RandomVariableRegistry(self._asmnt.options.rng) + capacity_rv_reg = uq.RandomVariableRegistry(self._asmnt.options.rng) + lsds_rv_reg = uq.RandomVariableRegistry(self._asmnt.options.rng) # capacity adjustment: # ensure the scaling_specification is a dictionary @@ -1283,20 +1315,23 @@ def map_ds(values, offset): for key, value in scaling_specification.items(): css = 'capacity adjustment specification' if not isinstance(value, str): - raise ValueError( + msg = ( f'Invalid entry in {css}: {value}. It has to be a string. ' f'See docstring of DamageModel._create_dmg_RVs.' ) + raise TypeError(msg) capacity_adjustment_operation = value[0] number = value[1::] - if capacity_adjustment_operation not in ('+', '-', '*', '/'): - raise ValueError( + if capacity_adjustment_operation not in {'+', '-', '*', '/'}: + msg = ( f'Invalid operation in {css}: ' f'{capacity_adjustment_operation}' ) + raise ValueError(msg) fnumber = base.float_or_None(number) if fnumber is None: - raise ValueError(f'Invalid number in {css}: {number}') + msg = f'Invalid number in {css}: {number}' + raise ValueError(msg) parsed_scaling_specification[key] = ( capacity_adjustment_operation, fnumber, @@ -1304,17 +1339,15 @@ def map_ds(values, offset): scaling_specification = parsed_scaling_specification # get the component sample and blocks from the asset model - for PG in PGB.index: + for pg in pgb.index: # determine demand capacity adjustment operation, if required - cmp_loc_dir = '-'.join(PG[0:3]) - capacity_adjustment_operation = ( - scaling_specification.get( # type: ignore - cmp_loc_dir, - ) + cmp_loc_dir = '-'.join(pg[0:3]) + capacity_adjustment_operation = scaling_specification.get( # type: ignore + cmp_loc_dir, ) - cmp_id = PG[0] - blocks = PGB.loc[PG, 'Blocks'] + cmp_id = pg[0] + blocks = pgb.loc[pg, 'Blocks'] # Calculate the block weights blocks = np.full(int(blocks), 1.0 / blocks) @@ -1329,26 +1362,26 @@ def map_ds(values, offset): for val in frg_params.index.get_level_values(0).unique(): if 'LS' in val: - limit_states.append(val[2:]) + limit_states.append(val[2:]) # noqa: PERF401 ds_id = 0 frg_rv_set_tags: list = [[] for b in blocks] - anchor_RVs: list = [] + anchor_rvs: list = [] for ls_id in limit_states: - frg_params_LS = frg_params[f'LS{ls_id}'] + frg_params_ls = frg_params[f'LS{ls_id}'] - theta_0 = frg_params_LS.get('Theta_0', np.nan) - family = frg_params_LS.get('Family', 'deterministic') - ds_weights = frg_params_LS.get('DamageStateWeights', np.nan) + theta_0 = frg_params_ls.get('Theta_0', np.nan) + family = frg_params_ls.get('Family', 'deterministic') + ds_weights = frg_params_ls.get('DamageStateWeights', None) # check if the limit state is defined for the component if pd.isna(theta_0): continue theta = [ - frg_params_LS.get(f"Theta_{t_i}", np.nan) for t_i in range(3) + frg_params_ls.get(f'Theta_{t_i}', np.nan) for t_i in range(3) ] if capacity_adjustment_operation: @@ -1359,24 +1392,24 @@ def map_ds(values, offset): float(capacity_adjustment_operation[1]), ) else: - self.log.warn( + self.log.warning( f'Capacity adjustment is only supported ' f'for `normal` or `lognormal` distributions. ' f'Ignoring: `{cmp_loc_dir}`, which is `{family}`' ) tr_lims = [ - frg_params_LS.get(f"Truncate{side}", np.nan) - for side in ("Lower", "Upper") + frg_params_ls.get(f'Truncate{side}', np.nan) + for side in ('Lower', 'Upper') ] for block_i, _ in enumerate(blocks): frg_rv_tag = ( 'FRG-' - f'{PG[0]}-' # cmp_id - f'{PG[1]}-' # loc - f'{PG[2]}-' # dir - f'{PG[3]}-' # uid + f'{pg[0]}-' # cmp_id + f'{pg[1]}-' # loc + f'{pg[2]}-' # dir + f'{pg[3]}-' # uid f'{block_i + 1}-' # block f'{ls_id}' ) @@ -1397,7 +1430,7 @@ def map_ds(values, offset): if ls_id == limit_states[0]: anchor = None else: - anchor = anchor_RVs[block_i] + anchor = anchor_rvs[block_i] # parse theta values for multilinear_CDF if family == 'multilinear_CDF': @@ -1414,55 +1447,55 @@ def map_ds(values, offset): ) ) - RV = uq.rv_class_map(family)( # type: ignore + rv = uq.rv_class_map(family)( # type: ignore name=frg_rv_tag, theta=theta, truncation_limits=tr_lims, anchor=anchor, ) - capacity_RV_reg.add_RV(RV) # type: ignore + capacity_rv_reg.add_RV(rv) # type: ignore # add the RV to the set of correlated variables frg_rv_set_tags[block_i].append(frg_rv_tag) if ls_id == limit_states[0]: - anchor_RVs.append(RV) + anchor_rvs.append(rv) # Now add the LS->DS assignments lsds_rv_tag = ( 'LSDS-' - f'{PG[0]}-' # cmp_id - f'{PG[1]}-' # loc - f'{PG[2]}-' # dir - f'{PG[3]}-' # uid + f'{pg[0]}-' # cmp_id + f'{pg[1]}-' # loc + f'{pg[2]}-' # dir + f'{pg[3]}-' # uid f'{block_i + 1}-' # block f'{ls_id}' ) ds_id_next = assign_lsds( - ds_weights, ds_id, lsds_RV_reg, lsds_rv_tag + ds_weights, ds_id, lsds_rv_reg, lsds_rv_tag ) ds_id = ds_id_next if self._asmnt.log.verbose: - rv_count = len(lsds_RV_reg.RV) + rv_count = len(lsds_rv_reg.RV) self.log.msg( - f"2x{rv_count} random variables created.", prepend_timestamp=False + f'2x{rv_count} random variables created.', prepend_timestamp=False ) - return capacity_RV_reg, lsds_RV_reg + return capacity_rv_reg, lsds_rv_reg - def _prepare_dmg_quantities( + def prepare_dmg_quantities( self, component_sample: pd.DataFrame, - component_marginal_parameters: pd.DataFrame, + component_marginal_parameters: pd.DataFrame | None, + *, dropzero: bool = True, ) -> pd.DataFrame: """ - Combine component quantity and damage state information in one - DataFrame. + Combine component quantity and damage state information. This method assumes that a component quantity sample is available in the asset model and a damage state sample is @@ -1470,7 +1503,7 @@ def _prepare_dmg_quantities( Parameters ---------- - component_quantities: pd.DataFrame + component_sample: pd.DataFrame Component quantity sample from the AssetModel. component_marginal_parameters: pd.DataFrame Component marginal parameters from the AssetModel. @@ -1485,11 +1518,9 @@ def _prepare_dmg_quantities( damage state information. """ - # ('cmp', 'loc', 'dir', 'uid') -> component quantity series component_quantities = component_sample.to_dict('series') - # pylint: disable=missing-return-doc if self._asmnt.log.verbose: self.log.msg('Calculating damage quantities...', prepend_timestamp=True) @@ -1504,15 +1535,13 @@ def _prepare_dmg_quantities( # ('cmp', 'loc', 'dir', 'uid) -> number of blocks num_blocks = component_marginal_parameters['Blocks'].to_dict() - def get_num_blocks(key): - # pylint: disable=missing-return-type-doc + def get_num_blocks(key: object) -> float: return float(num_blocks[key]) else: # otherwise assume 1 block regardless of # ('cmp', 'loc', 'dir', 'uid) key - def get_num_blocks(_): - # pylint: disable=missing-return-type-doc + def get_num_blocks(key: object) -> float: # noqa: ARG001 return 1.00 # ('cmp', 'loc', 'dir', 'uid', 'block') -> damage state series @@ -1534,18 +1563,20 @@ def get_num_blocks(_): if dropzero and ds == 0: continue dmg_qnt_vals = np.where( - damage_state_series.values == ds, - component_quantities[component, location, direction, uid].values + damage_state_series.to_numpy() == ds, + component_quantities[ + component, location, direction, uid + ].to_numpy() / get_num_blocks((component, location, direction, uid)), 0.00, ) if -1 in damage_state_set: dmg_qnt_vals = np.where( - damage_state_series.values != -1, dmg_qnt_vals, np.nan + damage_state_series.to_numpy() != -1, dmg_qnt_vals, np.nan ) dmg_qnt_series = pd.Series(dmg_qnt_vals) dmg_qnt_series_collection[ - (component, location, direction, uid, block, str(ds)) + component, location, direction, uid, block, str(ds) ] = dmg_qnt_series damage_quantities = pd.concat( @@ -1558,13 +1589,11 @@ def get_num_blocks(_): # min_count=1 is specified so that the sum cross all NaNs will # result in NaN instead of zero. # https://stackoverflow.com/questions/33448003/sum-across-all-nans-in-pandas-returns-zero - damage_quantities = damage_quantities.groupby( # type: ignore + return damage_quantities.groupby( # type: ignore level=['cmp', 'loc', 'dir', 'uid', 'ds'], axis=1 ).sum(min_count=1) - return damage_quantities - - def _perform_dmg_task(self, task: tuple) -> None: + def perform_dmg_task(self, task: tuple) -> None: # noqa: C901 """ Perform a task from a damage process. @@ -1578,7 +1607,7 @@ def _perform_dmg_task(self, task: tuple) -> None: Parameters ---------- - task : list + task: list A list representing a task from the damage process. The list contains two elements: - The first element is a string representing the source @@ -1603,8 +1632,8 @@ def _perform_dmg_task(self, task: tuple) -> None: ValueError Raises an error if the source or target event descriptions do not follow expected formats. - """ + """ if self._asmnt.log.verbose: self.log.msg(f'Applying task {task}...', prepend_timestamp=True) @@ -1624,29 +1653,30 @@ def _perform_dmg_task(self, task: tuple) -> None: # DataFrame assert self.ds_sample is not None if source_cmp not in self.ds_sample.columns.get_level_values('cmp'): - self.log.warn( - f"Source component `{source_cmp}` in the prescribed " - "damage process not found among components in the damage " - "sample. The corresponding part of the damage process is " - "skipped." + self.log.warning( + f'Source component `{source_cmp}` in the prescribed ' + 'damage process not found among components in the damage ' + 'sample. The corresponding part of the damage process is ' + 'skipped.' ) return - # execute the events pres prescribed in the damage task + # execute the events prescribed in the damage task for source_event, target_infos in events.items(): # events can only be triggered by damage state occurrence if not source_event.startswith('DS'): - raise ValueError( - f"Unable to parse source event in damage " - f"process: `{source_event}`" + msg = ( + f'Unable to parse source event in damage ' + f'process: `{source_event}`' ) + raise ValueError(msg) # get the ID of the damage state that triggers the event ds_source = int(source_event[2:]) # turn the target_infos into a list if it is a single # argument, for consistency if not isinstance(target_infos, list): - target_infos = [target_infos] + target_infos = [target_infos] # noqa: PLW2901 for target_info in target_infos: # get the target component and event type @@ -1655,11 +1685,11 @@ def _perform_dmg_task(self, task: tuple) -> None: if (target_cmp != 'ALL') and ( target_cmp not in self.ds_sample.columns.get_level_values('cmp') ): - self.log.warn( - f"Target component `{target_cmp}` in the prescribed " - "damage process not found among components in the damage " - "sample. The corresponding part of the damage process is " - "skipped." + self.log.warning( + f'Target component `{target_cmp}` in the prescribed ' + 'damage process not found among components in the damage ' + 'sample. The corresponding part of the damage process is ' + 'skipped.' ) continue @@ -1675,10 +1705,11 @@ def _perform_dmg_task(self, task: tuple) -> None: # -1 stands for nan (ints don'ts support nan) else: - raise ValueError( - f"Unable to parse target event in damage " - f"process: `{target_event}`" + msg = ( + f'Unable to parse target event in damage ' + f'process: `{target_event}`' ) + raise ValueError(msg) if match_locations: self._perform_dmg_event_loc( @@ -1700,16 +1731,15 @@ def _perform_dmg_event( ) -> None: """ Perform a damage event. - See `_perform_dmg_task`. + See `_perform_dmg_task`. """ - # affected rows assert self.ds_sample is not None row_selection = np.where( # for many instances of source_cmp, we # consider the highest damage state - self.ds_sample[source_cmp].max(axis=1).values # type: ignore + self.ds_sample[source_cmp].max(axis=1).to_numpy() # type: ignore == ds_source )[0] # affected columns @@ -1728,10 +1758,23 @@ def _perform_dmg_event_loc( ) -> None: """ Perform a damage event matching locations. - See `_perform_dmg_task`. - """ + Parameters + ---------- + source_cmp: str + Source component, e.g., `'1_CMP_A'`. The number in the beginning + is used to order the tasks and is not considered here. + ds_source: int + Source damage state. + target_cmp: str + Target component, e.g., `'CMP_B'`. The components that + will be affected when `source_cmp` gets to `ds_source`. + ds_target: int + Target damage state, e.g., `'DS_1'`. The damage state that + is assigned to `target_cmp` when `source_cmp` gets to + `ds_source`. + """ # get locations of source component assert self.ds_sample is not None source_locs = set(self.ds_sample[source_cmp].columns.get_level_values('loc')) @@ -1740,8 +1783,7 @@ def _perform_dmg_event_loc( row_selection = np.where( # for many instances of source_cmp, we # consider the highest damage state - self.ds_sample[source_cmp, loc].max(axis=1).values - == ds_source + self.ds_sample[source_cmp, loc].max(axis=1).to_numpy() == ds_source )[0] # affected columns @@ -1761,14 +1803,16 @@ def _perform_dmg_event_loc( )[0] self.ds_sample.iloc[row_selection, column_selection] = ds_target - def _complete_ds_cols(self, dmg_sample: pd.DataFrame) -> pd.DataFrame: + def complete_ds_cols(self, dmg_sample: pd.DataFrame) -> pd.DataFrame: """ + Complete damage state columns. + Completes the damage sample DataFrame with all possible damage states for each component. Parameters ---------- - dmg_sample : DataFrame + dmg_sample: DataFrame A DataFrame containing the damage state information for each component block in the asset model. The columns are MultiIndexed with levels corresponding to component @@ -1793,10 +1837,9 @@ def _complete_ds_cols(self, dmg_sample: pd.DataFrame) -> pd.DataFrame: damage states for each component. """ - # get a shortcut for the damage model parameters - DP = self.damage_params - assert DP is not None + dp = self.damage_params + assert dp is not None # Get the header for the results that we can use to identify # cmp-loc-dir-uid sets @@ -1811,20 +1854,20 @@ def _complete_ds_cols(self, dmg_sample: pd.DataFrame) -> pd.DataFrame: damaged_components = set(dmg_header.columns.get_level_values('cmp')) # get the number of possible limit states - ls_list = [col for col in DP.columns.unique(level=0) if 'LS' in col] + ls_list = [col for col in dp.columns.unique(level=0) if 'LS' in col] # initialize the result DataFrame res = pd.DataFrame() - # TODO: For the code below, store the number of damage states + # TODO(JVM): For the code below, store the number of damage states # for each component ID as an attribute of the ds_model when # loading the parameters, and then directly access them here # much faster instead of parsing the parameters again. # walk through all components that have damage parameters provided - for cmp_id in DP.index: + for cmp_id in dp.index: # get the component-specific parameters - cmp_data = DP.loc[cmp_id] + cmp_data = dp.loc[cmp_id] # and initialize the damage state counter ds_count = 0 @@ -1832,15 +1875,15 @@ def _complete_ds_cols(self, dmg_sample: pd.DataFrame) -> pd.DataFrame: # walk through all limit states for the component for ls in ls_list: # check if the given limit state is defined - if not pd.isna(cmp_data[(ls, 'Theta_0')]): + if not pd.isna(cmp_data[ls, 'Theta_0']): # check if there is only one damage state - if pd.isna(cmp_data[(ls, 'DamageStateWeights')]): + if pd.isna(cmp_data[ls, 'DamageStateWeights']): ds_count += 1 else: # or if there are more than one, how many ds_count += len( - cmp_data[(ls, 'DamageStateWeights')].split('|') + cmp_data[ls, 'DamageStateWeights'].split('|') ) # get the list of valid cmp-loc-dir-uid sets @@ -1853,7 +1896,7 @@ def _complete_ds_cols(self, dmg_sample: pd.DataFrame) -> pd.DataFrame: # multiindexed column cmp_headers = pd.concat( [cmp_header for ds_i in range(ds_count + 1)], - keys=[str(r) for r in range(0, ds_count + 1)], + keys=[str(r) for r in range(ds_count + 1)], axis=1, ) cmp_headers.columns.names = ['ds', *cmp_headers.columns.names[1::]] @@ -1877,7 +1920,20 @@ def _complete_ds_cols(self, dmg_sample: pd.DataFrame) -> pd.DataFrame: def _is_for_ds_model(data: pd.DataFrame) -> bool: """ + Check if data are for `ds_model`. + Determines if the specified damage model parameters are for components modeled with discrete Damage States (DS). + + Parameters + ---------- + data: pd.DataFrame + The data to check. + + Returns + ------- + bool + If the data are for `ds_model`. + """ return 'LS1' in data.columns.get_level_values(0) diff --git a/pelicun/model/demand_model.py b/pelicun/model/demand_model.py index 4b540939b..3b87a4032 100644 --- a/pelicun/model/demand_model.py +++ b/pelicun/model/demand_model.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,23 +37,22 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -This file defines the DemandModel object and its methods. -""" +"""DemandModel object and associated methods.""" from __future__ import annotations -from typing import TYPE_CHECKING + import re -import os from collections import defaultdict +from pathlib import Path +from typing import TYPE_CHECKING, overload + +import numexpr as ne import numpy as np import pandas as pd -import numexpr as ne + +from pelicun import base, file_io, uq from pelicun.model.pelicun_model import PelicunModel -from pelicun import base -from pelicun import uq -from pelicun import file_io if TYPE_CHECKING: from pelicun.assessment import AssessmentBase @@ -96,16 +94,25 @@ class DemandModel(PelicunModel): """ __slots__ = [ - 'marginal_params', + '_RVs', + 'calibrated', 'correlation', 'empirical_data', - 'user_units', - 'calibrated', - '_RVs', + 'marginal_params', 'sample', + 'user_units', ] - def __init__(self, assessment: AssessmentBase): + def __init__(self, assessment: AssessmentBase) -> None: + """ + Instantiate a DemandModel. + + Parameters + ---------- + assessment: Assessment + Parent assessment object. + + """ super().__init__(assessment) self.marginal_params: pd.DataFrame | None = None @@ -117,11 +124,19 @@ def __init__(self, assessment: AssessmentBase): self._RVs: uq.RandomVariableRegistry | None = None self.sample: pd.DataFrame | None = None + @overload def save_sample( - self, filepath: str | None = None, save_units: bool = False + self, filepath: None = None, *, save_units: bool = False + ) -> tuple[pd.DataFrame, pd.Series] | pd.DataFrame: ... + + @overload + def save_sample(self, filepath: str, *, save_units: bool = False) -> None: ... + + def save_sample( + self, filepath: str | None = None, *, save_units: bool = False ) -> None | tuple[pd.DataFrame, pd.Series] | pd.DataFrame: """ - Save demand sample to a csv file or return it in a DataFrame + Save demand sample to a csv file or return it in a DataFrame. Returns ------- @@ -133,13 +148,7 @@ def save_sample( If `save_units` is True, it returns a tuple of the DataFrame and a Series containing the units. - Raises - ------ - IOError - Raises an IOError if there is an issue saving the file to - the specified `filepath`. """ - self.log.div() if filepath is not None: self.log.msg('Saving demand sample...') @@ -147,7 +156,7 @@ def save_sample( assert self.sample is not None res = file_io.save_to_csv( self.sample, - filepath, + Path(filepath) if filepath is not None else None, units=self.user_units, unit_conversion_factors=self._asmnt.unit_conversion_factors, use_simpleindex=(filepath is not None), @@ -162,8 +171,8 @@ def save_sample( # else: assert isinstance(res, pd.DataFrame) - units = res.loc["Units"] - res.drop("Units", inplace=True) + units = res.loc['Units'] + res = res.drop('Units') assert isinstance(units, pd.Series) if save_units: @@ -187,8 +196,10 @@ def load_sample(self, filepath: str | pd.DataFrame) -> None: """ - def parse_header(raw_header): + def parse_header(raw_header: pd.Index[str]) -> pd.Index[str]: """ + Parse and clean header. + Parses and cleans the header of a demand DataFrame from raw multi-level index to a standardized format. @@ -203,7 +214,7 @@ def parse_header(raw_header): Parameters ---------- - raw_header : pd.MultiIndex + raw_header: pd.MultiIndex The original multi-level index (header) of the DataFrame, which may contain an optional event_ID and might have excess whitespace in the labels. @@ -216,24 +227,26 @@ def parse_header(raw_header): index has three levels: 'type', 'loc', and 'dir', representing the type of demand, location, and direction, respectively. + """ - old_MI = raw_header + old_mi = raw_header # The first number (event_ID) in the demand labels is optional and # currently not used. We remove it if it was in the raw data. - if old_MI.nlevels == 4: + num_levels_with_event_id = 4 + if old_mi.nlevels == num_levels_with_event_id: if self._asmnt.log.verbose: self.log.msg( 'Removing event_ID from header...', prepend_timestamp=False ) new_column_index_array = np.array( - [old_MI.get_level_values(i) for i in range(1, 4)] + [old_mi.get_level_values(i) for i in range(1, 4)] ) else: new_column_index_array = np.array( - [old_MI.get_level_values(i) for i in range(3)] + [old_mi.get_level_values(i) for i in range(3)] ) # Remove whitespace to avoid ambiguity @@ -249,12 +262,10 @@ def parse_header(raw_header): # Creating new, cleaned-up header - new_MI = pd.MultiIndex.from_arrays( + return pd.MultiIndex.from_arrays( new_column_index, names=['type', 'loc', 'dir'] ) - return new_MI - self.log.div() self.log.msg('Loading demand data...') @@ -279,19 +290,23 @@ def parse_header(raw_header): 'Removing errors from the raw data...', prepend_timestamp=False ) - error_list = parsed_data.loc[ # type: ignore - :, # type: ignore - idx['ERROR', :, :], # type: ignore - ].values.astype( # type: ignore - bool # type: ignore + error_list = ( + parsed_data.loc[ # type: ignore + :, # type: ignore + idx['ERROR', :, :], # type: ignore + ] + .to_numpy() + .astype( # type: ignore + bool # type: ignore + ) ) # type: ignore parsed_data = parsed_data.loc[~error_list, :].copy() - parsed_data.drop('ERROR', level=0, axis=1, inplace=True) + parsed_data = parsed_data.drop('ERROR', level=0, axis=1) self.log.msg( - "\nBased on the values in the ERROR column, " - f"{np.sum(error_list)} demand samples were removed.\n", + '\nBased on the values in the ERROR column, ' + f'{np.sum(error_list)} demand samples were removed.\n', prepend_timestamp=False, ) @@ -306,13 +321,15 @@ def parse_header(raw_header): self.log.msg('Demand units successfully parsed.', prepend_timestamp=False) - def estimate_RID( + def estimate_RID( # noqa: N802 self, demands: pd.DataFrame | pd.Series, params: dict, method: str = 'FEMA P58', ) -> pd.DataFrame: """ + Estimate residual inter-story drift (RID). + Estimates residual inter-story drift (RID) realizations based on peak inter-story drift (PID) and other demand parameters using specified methods. @@ -325,16 +342,16 @@ def estimate_RID( Parameters ---------- - demands : DataFrame + demands: DataFrame A DataFrame containing samples of demands, specifically peak inter-story drift (PID) values for various location-direction pairs required for the estimation method. - params : dict + params: dict A dictionary containing parameters required for the estimation method, such as 'yield_drift', which is the drift at which yielding is expected to occur. - method : str, optional + method: str, optional The method used to estimate the RID values. Currently, only 'FEMA P58' is implemented. Defaults to 'FEMA P58'. @@ -360,6 +377,7 @@ def estimate_RID( RID values to model the inherent uncertainty. The method ensures that the RID values do not exceed the corresponding PID values. + """ if method in {'FEMA P58', 'FEMA P-58'}: # method is described in FEMA P-58 Volume 1 Section 5.4 & @@ -367,60 +385,63 @@ def estimate_RID( # the provided demands shall be PID values at various # loc-dir pairs - PID = demands + pid = demands # there's only one parameter needed: the yield drift yield_drift = params['yield_drift'] # three subdomains of demands are identified - small = PID < yield_drift - medium = PID < 4 * yield_drift - large = PID >= 4 * yield_drift + small = yield_drift > pid + medium = 4 * yield_drift > pid + large = 4 * yield_drift <= pid # convert PID to RID in each subdomain - RID = PID.copy() - RID[large] = PID[large] - 3 * yield_drift - RID[medium] = 0.3 * (PID[medium] - yield_drift) - RID[small] = 0.0 + rid = pid.copy() + rid[large] = pid[large] - 3 * yield_drift + rid[medium] = 0.3 * (pid[medium] - yield_drift) + rid[small] = 0.0 # add extra uncertainty to nonzero values rng = self._asmnt.options.rng - eps = rng.normal(scale=0.2, size=RID.shape) - RID[RID > 0] = np.exp(np.log(RID[RID > 0]) + eps) # type: ignore + eps = rng.normal(scale=0.2, size=rid.shape) + rid[rid > 0] = np.exp(np.log(rid[rid > 0]) + eps) # type: ignore # finally, make sure the RID values are never larger than # the PIDs - RID = pd.DataFrame( - np.minimum(PID.values, RID.values), # type: ignore - columns=pd.DataFrame( + rid = pd.DataFrame( + np.minimum(pid.values, rid.values), # type: ignore + columns=pd.DataFrame( # noqa: PD013 1, index=['RID'], - columns=PID.columns, + columns=pid.columns, ) .stack(level=[0, 1]) .index, - index=PID.index, + index=pid.index, ) else: - raise ValueError(f'Invalid method: `{method}`.') + msg = f'Invalid method: `{method}`.' + raise ValueError(msg) - return RID + return rid - def estimate_RID_and_adjust_sample( + def estimate_RID_and_adjust_sample( # noqa: N802 self, params: dict, method: str = 'FEMA P58' ) -> None: """ + Estimate residual inter-story drift (RID) and modifies sample. + Uses `self.estimate_RID` and adjusts the demand sample. See the docstring of the `estimate_RID` method for details. Parameters ---------- - params : dict + params: dict A dictionary containing parameters required for the estimation method, such as 'yield_drift', which is the drift at which yielding is expected to occur. - method : str, optional + method: str, optional The method used to estimate the RID values. Currently, only 'FEMA P58' is implemented. Defaults to 'FEMA P58'. @@ -430,9 +451,9 @@ def estimate_RID_and_adjust_sample( If the method is called before a sample is generated. """ - if self.sample is None: - raise ValueError('Demand model does not have a sample yet.') + msg = 'Demand model does not have a sample yet.' + raise ValueError(msg) sample_tuple = self.save_sample(save_units=True) assert isinstance(sample_tuple, tuple) @@ -452,13 +473,13 @@ def expand_sample( label: str, value: float | np.ndarray, unit: str, - location='0', - direction='1', + location: str = '0', + direction: str = '1', ) -> None: """ - Adds an extra column to the demand sample. + Add an extra column to the demand sample. - The column comtains repeated instances of `value`, is accessed + The column contains repeated instances of `value`, is accessed via the multi-index (`label`-`location`-`direction`), and has units of `unit`. @@ -484,23 +505,24 @@ def expand_sample( """ if self.sample is None: - raise ValueError('Demand model does not have a sample yet.') + msg = 'Demand model does not have a sample yet.' + raise ValueError(msg) sample_tuple = self.save_sample(save_units=True) assert isinstance(sample_tuple, tuple) demand_sample, demand_units = sample_tuple - if isinstance(value, np.ndarray): - value = np.atleast_1d(value) - assert isinstance(value, np.ndarray) - if len(value) != len(demand_sample): - raise ValueError('Incompatible array length.') - demand_sample[(label, location, direction)] = value - demand_units[(label, location, direction)] = unit + assert isinstance(demand_sample, pd.DataFrame) + assert isinstance(demand_units, pd.Series) + if isinstance(value, np.ndarray) and len(value) != len(demand_sample): + msg = 'Incompatible array length.' + raise ValueError(msg) + demand_sample[label, location, direction] = value + demand_units[label, location, direction] = unit demand_sample.loc['Units', :] = demand_units self.load_sample(demand_sample) - def calibrate_model(self, config: dict) -> None: + def calibrate_model(self, config: dict) -> None: # noqa: C901 """ - Calibrate a demand model to describe the raw demand data + Calibrate a demand model to describe the raw demand data. The raw data shall be parsed first to ensure that it follows the schema expected by this method. The calibration settings define the @@ -515,27 +537,28 @@ def calibrate_model(self, config: dict) -> None: settings for the calibration. """ - if self.calibrated: - self.log.warn('DemandModel has been previously calibrated.') + self.log.warning('DemandModel has been previously calibrated.') - def parse_settings(settings, demand_type): - def parse_str_to_float(in_str, context_string): - # pylint: disable = missing-return-type-doc - # pylint: disable = missing-return-doc + def parse_settings( # noqa: C901 + cal_df: pd.DataFrame, settings: dict, demand_type: str + ) -> None: + def parse_str_to_float(in_str: str, context_string: str) -> float: try: out_float = float(in_str) except ValueError: - self.log.warn( - f"Could not parse {in_str} provided as " - f"{context_string}. Using NaN instead." + self.log.warning( + f'Could not parse {in_str} provided as ' + f'{context_string}. Using NaN instead.' ) out_float = np.nan return out_float + demand_sample = self.save_sample() + assert isinstance(demand_sample, pd.DataFrame) active_d_types = demand_sample.columns.get_level_values('type').unique() if demand_type == 'ALL': @@ -546,7 +569,7 @@ def parse_str_to_float(in_str, context_string): for d_type in active_d_types: if d_type.split('_')[0] == demand_type: - cols_lst.append(d_type) + cols_lst.append(d_type) # noqa: PERF401 cols = tuple(cols_lst) @@ -560,13 +583,13 @@ def parse_str_to_float(in_str, context_string): 'TruncateLower', 'TruncateUpper', ): - if lim in settings.keys(): + if lim in settings: val = parse_str_to_float(settings[lim], lim) if not pd.isna(val): cal_df.loc[idx[cols, :, :], lim] = val # scale the censor and truncation limits, if needed - scale_factor = self._asmnt.scale_factor(settings.get('Unit', None)) + scale_factor = self._asmnt.scale_factor(settings.get('Unit')) rows_to_scale = [ 'CensorLower', @@ -574,10 +597,10 @@ def parse_str_to_float(in_str, context_string): 'TruncateLower', 'TruncateUpper', ] - cal_df.loc[idx[cols, :, :], rows_to_scale] *= scale_factor + cal_df.loc[idx[cols, :, :], rows_to_scale] *= scale_factor # type: ignore # load the prescribed additional uncertainty - if 'AddUncertainty' in settings.keys(): + if 'AddUncertainty' in settings: sig_increase = parse_str_to_float( settings['AddUncertainty'], 'AddUncertainty' ) @@ -588,9 +611,11 @@ def parse_str_to_float(in_str, context_string): cal_df.loc[idx[cols, :, :], 'SigIncrease'] = sig_increase - def get_filter_mask(lower_lims, upper_lims): - # pylint: disable=missing-return-doc - # pylint: disable=missing-return-type-doc + def get_filter_mask( + demand_sample: pd.DataFrame, + lower_lims: np.ndarray, + upper_lims: np.ndarray, + ) -> bool: demands_of_interest = demand_sample.iloc[:, pd.notna(upper_lims)] limits_of_interest = upper_lims[pd.notna(upper_lims)] upper_mask = np.all(demands_of_interest < limits_of_interest, axis=1) @@ -626,21 +651,21 @@ def get_filter_mask(lower_lims, upper_lims): cal_df['Family'] = cal_df['Family'].astype(str) # start by assigning the default option ('ALL') to every demand column - parse_settings(config['ALL'], 'ALL') + parse_settings(cal_df, config['ALL'], demand_type='ALL') # then parse the additional settings and make the necessary adjustments - for demand_type in config.keys(): + for demand_type in config: # noqa: PLC0206 if demand_type != 'ALL': - parse_settings(config[demand_type], demand_type) + parse_settings(cal_df, config[demand_type], demand_type=demand_type) if self._asmnt.log.verbose: self.log.msg( - "\nCalibration settings successfully parsed:\n" + str(cal_df), + '\nCalibration settings successfully parsed:\n' + str(cal_df), prepend_timestamp=False, ) else: self.log.msg( - "\nCalibration settings successfully parsed:\n", + '\nCalibration settings successfully parsed:\n', prepend_timestamp=False, ) @@ -651,19 +676,19 @@ def get_filter_mask(lower_lims, upper_lims): # Currently, non-empirical demands are assumed to have some level of # correlation, hence, a censored value in any demand triggers the # removal of the entire sample from the population. - upper_lims = cal_df.loc[:, 'CensorUpper'].values - lower_lims = cal_df.loc[:, 'CensorLower'].values + upper_lims = cal_df.loc[:, 'CensorUpper'].to_numpy() + lower_lims = cal_df.loc[:, 'CensorLower'].to_numpy() assert isinstance(demand_sample, pd.DataFrame) if ~np.all(pd.isna(np.array([upper_lims, lower_lims]))): - censor_mask = get_filter_mask(lower_lims, upper_lims) + censor_mask = get_filter_mask(demand_sample, lower_lims, upper_lims) censored_count = np.sum(~censor_mask) - demand_sample = demand_sample.loc[censor_mask, :] + demand_sample = pd.DataFrame(demand_sample.loc[censor_mask, :]) self.log.msg( - "\nBased on the provided censoring limits, " - f"{censored_count} samples were censored.", + '\nBased on the provided censoring limits, ' + f'{censored_count} samples were censored.', prepend_timestamp=False, ) else: @@ -673,21 +698,21 @@ def get_filter_mask(lower_lims, upper_lims): # If yes, that suggests an error either in the samples or the # configuration. We handle such errors gracefully: the analysis is not # terminated, but we show an error in the log file. - upper_lims = cal_df.loc[:, 'TruncateUpper'].values - lower_lims = cal_df.loc[:, 'TruncateLower'].values + upper_lims = cal_df.loc[:, 'TruncateUpper'].to_numpy() + lower_lims = cal_df.loc[:, 'TruncateLower'].to_numpy() assert isinstance(demand_sample, pd.DataFrame) if ~np.all(pd.isna(np.array([upper_lims, lower_lims]))): - truncate_mask = get_filter_mask(lower_lims, upper_lims) + truncate_mask = get_filter_mask(demand_sample, lower_lims, upper_lims) truncated_count = np.sum(~truncate_mask) if truncated_count > 0: - demand_sample = demand_sample.loc[truncate_mask, :] + demand_sample = pd.DataFrame(demand_sample.loc[truncate_mask, :]) self.log.msg( - "\nBased on the provided truncation limits, " - f"{truncated_count} samples were removed before demand " - "calibration.", + '\nBased on the provided truncation limits, ' + f'{truncated_count} samples were removed before demand ' + 'calibration.', prepend_timestamp=False, ) @@ -698,7 +723,7 @@ def get_filter_mask(lower_lims, upper_lims): empirical_edps = [] for edp in cal_df.index: if cal_df.loc[edp, 'Family'] == 'empirical': - empirical_edps.append(edp) + empirical_edps.append(edp) # noqa: PERF401 assert isinstance(demand_sample, pd.DataFrame) if empirical_edps: @@ -712,18 +737,18 @@ def get_filter_mask(lower_lims, upper_lims): if self._asmnt.log.verbose: self.log.msg( - f"\nDemand data used for calibration:\n{demand_sample}", + f'\nDemand data used for calibration:\n{demand_sample}', prepend_timestamp=False, ) # fit the joint distribution self.log.msg( - "\nFitting the prescribed joint demand distribution...", + '\nFitting the prescribed joint demand distribution...', prepend_timestamp=False, ) demand_theta, demand_rho = uq.fit_distribution_to_sample( - raw_samples=demand_sample.values.T, + raw_samples=demand_sample.to_numpy().T, distribution=cal_df.loc[:, 'Family'].values, # type: ignore censored_count=censored_count, detection_limits=cal_df.loc[ # type: ignore @@ -738,7 +763,7 @@ def get_filter_mask(lower_lims, upper_lims): ) # fit the joint distribution self.log.msg( - "\nCalibration successful, processing results...", + '\nCalibration successful, processing results...', prepend_timestamp=False, ) @@ -747,12 +772,12 @@ def get_filter_mask(lower_lims, upper_lims): # increase the variance of the marginal distributions, if needed if ~np.all(pd.isna(model_params.loc[:, 'SigIncrease'].values)): - self.log.msg("\nIncreasing demand variance...", prepend_timestamp=False) + self.log.msg('\nIncreasing demand variance...', prepend_timestamp=False) sig_inc = np.nan_to_num( model_params.loc[:, 'SigIncrease'].values, # type: ignore ) - sig_0 = model_params.loc[:, 'Theta_1'].values + sig_0 = model_params.loc[:, 'Theta_1'].to_numpy() model_params.loc[:, 'Theta_1'] = np.sqrt( sig_0**2.0 + sig_inc**2.0, # type: ignore @@ -770,7 +795,7 @@ def get_filter_mask(lower_lims, upper_lims): self.marginal_params = model_params self.log.msg( - "\nCalibrated demand model marginal distributions:\n" + '\nCalibrated demand model marginal distributions:\n' + str(model_params), prepend_timestamp=False, ) @@ -781,7 +806,7 @@ def get_filter_mask(lower_lims, upper_lims): ) self.log.msg( - "\nCalibrated demand model correlation matrix:\n" + '\nCalibrated demand model correlation matrix:\n' + str(self.correlation), prepend_timestamp=False, ) @@ -789,20 +814,16 @@ def get_filter_mask(lower_lims, upper_lims): self.calibrated = True def save_model(self, file_prefix: str) -> None: - """ - Save parameters of the demand model to a set of csv files - - """ - + """Save parameters of the demand model to a set of csv files.""" self.log.div() self.log.msg('Saving demand model...') # save the correlation and empirical data - file_io.save_to_csv(self.correlation, file_prefix + '_correlation.csv') + file_io.save_to_csv(self.correlation, Path(file_prefix + '_correlation.csv')) if self.empirical_data is not None: file_io.save_to_csv( self.empirical_data, - file_prefix + '_empirical.csv', + Path(file_prefix + '_empirical.csv'), units=self.user_units, unit_conversion_factors=self._asmnt.unit_conversion_factors, log=self._asmnt.log, @@ -820,7 +841,7 @@ def save_model(self, file_prefix: str) -> None: file_io.save_to_csv( marginal_params_user_units, - file_prefix + '_marginals.csv', + Path(file_prefix + '_marginals.csv'), orientation=1, log=self._asmnt.log, ) @@ -847,7 +868,6 @@ def load_model(self, data_source: str | dict) -> None: If the data source type is invalid. """ - self.log.div() self.log.msg('Loading demand model...') @@ -862,11 +882,13 @@ def load_model(self, data_source: str | dict) -> None: empirical_data_source = data_source + '_empirical.csv' correlation_data_source = data_source + '_correlation.csv' else: - raise TypeError(f'Invalid data_source type: {type(data_source)}.') + msg = f'Invalid data_source type: {type(data_source)}.' + raise TypeError(msg) if empirical_data_source is not None: - if isinstance(empirical_data_source, str) and os.path.exists( - empirical_data_source + if ( + isinstance(empirical_data_source, str) + and Path(empirical_data_source).exists() ): empirical_data = file_io.load_data( empirical_data_source, @@ -888,8 +910,12 @@ def load_model(self, data_source: str | dict) -> None: ) assert isinstance(correlation, pd.DataFrame) self.correlation = correlation - self.correlation.index.set_names(['type', 'loc', 'dir'], inplace=True) - self.correlation.columns.set_names(['type', 'loc', 'dir'], inplace=True) + self.correlation.index = self.correlation.index.set_names( + ['type', 'loc', 'dir'] + ) + self.correlation.columns = self.correlation.columns.set_names( + ['type', 'loc', 'dir'] + ) else: self.correlation = None @@ -905,7 +931,9 @@ def load_model(self, data_source: str | dict) -> None: assert isinstance(marginal_params, pd.DataFrame) assert isinstance(units, pd.Series) - marginal_params.index.set_names(['type', 'loc', 'dir'], inplace=True) + marginal_params.index = marginal_params.index.set_names( + ['type', 'loc', 'dir'] + ) marginal_params = self._convert_marginal_params( marginal_params.copy(), units @@ -916,34 +944,27 @@ def load_model(self, data_source: str | dict) -> None: self.log.msg('Demand model successfully loaded.', prepend_timestamp=False) - def _create_RVs(self, preserve_order: bool = False) -> None: - """ - Create a random variable registry for the joint distribution of demands. - - """ - + def _create_RVs(self, *, preserve_order: bool = False) -> None: # noqa: N802 + """Create a random variable registry for the joint distribution of demands.""" assert self.marginal_params is not None # initialize the registry - RV_reg = uq.RandomVariableRegistry(self._asmnt.options.rng) + rv_reg = uq.RandomVariableRegistry(self._asmnt.options.rng) # add a random variable for each demand variable for rv_params in self.marginal_params.itertuples(): edp = rv_params.Index rv_tag = f'EDP-{edp[0]}-{edp[1]}-{edp[2]}' # type: ignore - family = getattr(rv_params, "Family", 'deterministic') + family = getattr(rv_params, 'Family', 'deterministic') if family == 'empirical': - if preserve_order: - dist_family = 'coupled_empirical' - else: - dist_family = 'empirical' + dist_family = 'coupled_empirical' if preserve_order else 'empirical' # empirical RVs need the data points - RV_reg.add_RV( + rv_reg.add_RV( uq.rv_class_map(dist_family)( name=rv_tag, - raw_samples=self.empirical_data.loc[ # type: ignore + theta=self.empirical_data.loc[ # type: ignore :, # type: ignore edp, ].values, @@ -952,22 +973,22 @@ def _create_RVs(self, preserve_order: bool = False) -> None: else: # all other RVs need parameters of their distributions - RV_reg.add_RV( + rv_reg.add_RV( uq.rv_class_map(family)( name=rv_tag, theta=[ # type: ignore - getattr(rv_params, f"Theta_{t_i}", np.nan) + getattr(rv_params, f'Theta_{t_i}', np.nan) for t_i in range(3) ], truncation_limits=[ - getattr(rv_params, f"Truncate{side}", np.nan) - for side in ("Lower", "Upper") + getattr(rv_params, f'Truncate{side}', np.nan) + for side in ('Lower', 'Upper') ], ) ) self.log.msg( - f"\n{self.marginal_params.shape[0]} random variables created.", + f'\n{self.marginal_params.shape[0]} random variables created.', prepend_timestamp=False, ) @@ -975,32 +996,33 @@ def _create_RVs(self, preserve_order: bool = False) -> None: if self.correlation is not None: rv_set_tags = [ f'EDP-{edp[0]}-{edp[1]}-{edp[2]}' - for edp in self.correlation.index.values + for edp in self.correlation.index.to_numpy() ] - RV_reg.add_RV_set( + rv_reg.add_RV_set( uq.RandomVariableSet( 'EDP_set', - list(RV_reg.RVs(rv_set_tags).values()), + list(rv_reg.RVs(rv_set_tags).values()), self.correlation.values, ) ) self.log.msg( - f"\nCorrelations between {len(rv_set_tags)} random variables " - "successfully defined.", + f'\nCorrelations between {len(rv_set_tags)} random variables ' + 'successfully defined.', prepend_timestamp=False, ) - self._RVs = RV_reg + self._RVs = rv_reg def clone_demands(self, demand_cloning: dict) -> None: """ - Clones demands. This means copying over columns of the - original demand sample and assigning given names to them. The - columns to be copied over and the names to assign to the - copies are defined as the keys and values of the - `demand_cloning` dictionary, respectively. + Clone demands. + + Copies over columns of the original demand sample and + assigns given names to them. The columns to be copied over + and the names to be assigned to the copies are defined as the keys + and values of the `demand_cloning` dictionary. The method modifies `sample` inplace. Parameters @@ -1021,7 +1043,6 @@ def clone_demands(self, demand_cloning: dict) -> None: In multiple instances of invalid demand_cloning entries. """ - # it's impossible to have duplicate keys, because # demand_cloning is a dictionary. new_columns_list = demand_cloning.values() @@ -1036,12 +1057,11 @@ def clone_demands(self, demand_cloning: dict) -> None: for new_columns in new_columns_list: flat_list.extend(new_columns) if len(set(flat_list)) != len(flat_list): - raise ValueError('Duplicate entries in demand cloning configuration.') + msg = 'Duplicate entries in demand cloning configuration.' + raise ValueError(msg) # turn the config entries to tuples - def turn_to_tuples(demand_cloning): - # pylint: disable=missing-return-doc - # pylint: disable=missing-return-type-doc + def turn_to_tuples(demand_cloning: dict) -> dict: demand_cloning_tuples = {} for key, values in demand_cloning.items(): demand_cloning_tuples[tuple(key.split('-'))] = [ @@ -1057,13 +1077,13 @@ def turn_to_tuples(demand_cloning): assert self.sample is not None for column in demand_cloning: if column not in self.sample.columns: - warn_columns.append(column) + warn_columns.append(column) # noqa: PERF401 if warn_columns: warn_columns = ['-'.join(x) for x in warn_columns] - self.log.warn( - "The demand cloning configuration lists " + self.log.warning( + 'The demand cloning configuration lists ' "columns that are not present in the original demand sample's " - f"columns: {warn_columns}." + f'columns: {warn_columns}.' ) # we iterate over the existing columns of the sample and try @@ -1094,6 +1114,8 @@ def turn_to_tuples(demand_cloning): def generate_sample(self, config: dict) -> None: """ + Generate the demand sample. + Generates a sample of random variables (RVs) based on the specified configuration for demand modeling. @@ -1106,7 +1128,7 @@ def generate_sample(self, config: dict) -> None: Parameters ---------- - config : dict + config: dict A dictionary containing configuration options for the sample generation. Key options include: * 'SampleSize': The number of samples to generate. @@ -1142,14 +1164,15 @@ def generate_sample(self, config: dict) -> None: >>> model.generate_sample(config) # This will generate 1000 realizations of demand variables # with the specified configuration. - """ + """ if self.marginal_params is None: - raise ValueError( + msg = ( 'Model parameters have not been specified. Either ' 'load parameters from a file or calibrate the ' 'model using raw demand data.' ) + raise ValueError(msg) self.log.div() self.log.msg('Generating sample from demand variables...') @@ -1167,8 +1190,8 @@ def generate_sample(self, config: dict) -> None: assert self._RVs is not None assert self._RVs.RV_sample is not None sample = pd.DataFrame(self._RVs.RV_sample) - sample.sort_index(axis=0, inplace=True) - sample.sort_index(axis=1, inplace=True) + sample = sample.sort_index(axis=0) + sample = sample.sort_index(axis=1) sample_mi = base.convert_to_MultiIndex(sample, axis=1)['EDP'] assert isinstance(sample_mi, pd.DataFrame) @@ -1181,7 +1204,7 @@ def generate_sample(self, config: dict) -> None: self.clone_demands(config['DemandCloning']) self.log.msg( - f"\nSuccessfully generated {sample_size} realizations.", + f'\nSuccessfully generated {sample_size} realizations.', prepend_timestamp=False, ) @@ -1192,27 +1215,24 @@ def _get_required_demand_type( demand_offset: dict | None = None, ) -> dict: """ - Returns the id of the demand needed to calculate damage or - loss of a component. - - This method returns the demand type and its properties - required to calculate the the damage or loss of a - component. The properties include whether the demand is - directional, the offset, and the type of the demand. The - method takes as input a dataframe `PGB` that contains + Get the required demand type for the components. + + Returns the demand type and its properties required to calculate + the the damage or loss of a component. The properties include + whether the demand is directional, the offset, and the type of the + demand. The method takes as input a dataframe `PGB` that contains information about the component groups in the asset. For each - performance group PG in the PGB dataframe, the method - retrieves the relevant parameters from the model_params - dataframe and parses the demand type into its properties. If - the demand type has a subtype, the method splits it and adds - the subtype to the demand type to form the EDP type. The - method also considers the default offset for the demand type, - if it is specified in the options attribute of the assessment, - and adds the offset to the EDP. If the demand is directional, - the direction is added to the EDP. The method collects all the - unique EDPs for each component group and returns them as a - dictionary where each key is an EDP and its value is a list of - component groups that require that EDP. + performance group PG in the PGB dataframe, the method retrieves + the relevant parameters from the model_params dataframe and parses + the demand type into its properties. If the demand type has a + subtype, the method splits it and adds the subtype to the demand + type to form the EDP type. The method also considers the default + offset for the demand type, if it is specified in the options + attribute of the assessment, and adds the offset to the EDP. If + the demand is directional, the direction is added to the EDP. The + method collects all the unique EDPs for each component group and + returns them as a dictionary where each key is an EDP and its + value is a list of component groups that require that EDP. Parameters ---------- @@ -1235,8 +1255,13 @@ def _get_required_demand_type( corresponding value is a list of tuples (component_id, location, direction) - """ + Raises + ------ + ValueError + When a negative value is used for `loc`. Currently not + supported. + """ model_parameters = model_parameters.sort_index(axis=1) # Assign default demand_offset to empty dict. @@ -1246,7 +1271,6 @@ def _get_required_demand_type( required_edps = defaultdict(list) for pg in pgb.index: - cmp = pg[0] # Utility Demand: if there is an `Expression`, then load the @@ -1283,7 +1307,6 @@ def _get_required_demand_type( edps = [] for demand_parameters in demand_parameters_list: - demand_type = demand_parameters[0] offset = demand_parameters[1] directional = demand_parameters[2] @@ -1301,17 +1324,17 @@ def _get_required_demand_type( demand_type = base.EDP_to_demand_type[demand_type] # Concatenate the demand type and subtype to form the # EDP type - EDP_type = f'{demand_type}_{subtype}' + edp_type = f'{demand_type}_{subtype}' else: # If there is no subtype, convert the demand type to # the corresponding EDP type using # `base.EDP_to_demand_type` demand_type = base.EDP_to_demand_type[demand_type] # Assign the EDP type to be equal to the demand type - EDP_type = demand_type + edp_type = demand_type # Consider the default offset, if needed - if demand_type in demand_offset.keys(): + if demand_type in demand_offset: # If the demand type has a default offset in # `demand_offset`, add the offset # to the default offset @@ -1323,25 +1346,19 @@ def _get_required_demand_type( offset = int(offset) # type: ignore # Determine the direction - if directional: - # If the demand is directional, use the third element - # of the `PG` tuple as the direction - direction = pg[2] - else: - # If the demand is not directional, use '0' as the - # direction - direction = '0' + direction = pg[2] if directional else '0' # Concatenate the EDP type, offset, and direction to form # the EDP key - edp = f"{EDP_type}-{str(int(pg[1]) + offset)}-{direction}" + edp = f'{edp_type}-{int(pg[1]) + offset!s}-{direction}' if int(pg[1]) + offset < 0: - raise ValueError( + msg = ( f'Negative location encountered for component ' f'(cmp, loc, dir, uid)=`{pg}`. Would require `{edp}`. ' f'Please update the location of the component.' ) + raise ValueError(msg) edps.append(edp) @@ -1349,7 +1366,7 @@ def _get_required_demand_type( # Add the current PG (performance group) to the list of # PGs associated with the current EDP key - required_edps[(edps_t, expression)].append(pg) + required_edps[edps_t, expression].append(pg) # Return the required EDPs return required_edps @@ -1393,7 +1410,7 @@ def _assemble_required_demand_data( Returns ------- - demand_dict : dict + demand_dict: dict A dictionary of assembled demand data for calculation Raises @@ -1402,19 +1419,15 @@ def _assemble_required_demand_data( If demand data for a given EDP cannot be found """ - demand_dict = {} for edps, expression in required_edps: - edp_values = {} for edp in edps: - edp_type, location, direction = edp.split('-') if direction == '0': - # non-directional demand = ( demand_sample.loc[ @@ -1422,7 +1435,7 @@ def _assemble_required_demand_data( (edp_type, location), ] .max(axis=1) - .values + .to_numpy() ) if edp_type in nondirectional_multipliers: @@ -1432,18 +1445,18 @@ def _assemble_required_demand_data( multiplier = nondirectional_multipliers['ALL'] else: - raise ValueError( - f"Peak orthogonal EDP multiplier " - f"for non-directional demand " - f"calculation of `{edp_type}` not specified." + msg = ( + f'Peak orthogonal EDP multiplier ' + f'for non-directional demand ' + f'calculation of `{edp_type}` not specified.' ) + raise ValueError(msg) - demand = demand * multiplier + demand *= multiplier else: - # directional - demand = demand_sample[(edp_type, location, direction)].values + demand = demand_sample[edp_type, location, direction].to_numpy() edp_values[edp] = demand @@ -1452,24 +1465,26 @@ def _assemble_required_demand_data( # build a dict of values value_dict = {} for i, edp_value in enumerate(edp_values.values()): - value_dict[f'X{i+1}'] = edp_value + value_dict[f'X{i + 1}'] = edp_value demand = ne.evaluate( _clean_up_expression(expression), local_dict=value_dict ) - demand_dict[(edps, expression)] = demand + demand_dict[edps, expression] = demand return demand_dict def _clean_up_expression(expression: str) -> str: """ + Clean up a mathematical expression in a string. + Cleans up the given mathematical expression by ensuring it contains only allowed characters and replaces the caret (^) exponentiation operator with the double asterisk (**) operator. Parameters ---------- - expression : str + expression: str The mathematical expression to clean up. Returns @@ -1496,18 +1511,21 @@ def _clean_up_expression(expression: str) -> str: ... "for x in i.repeat(0)]" ... ) Traceback (most recent call last): ... + """ allowed_chars = re.compile(r'^[0-9a-zA-Z\^\+\-\*/\(\)\s]*$') if not bool(allowed_chars.match(expression)): - raise ValueError(f'Invalid expression: {expression}') + msg = f'Invalid expression: {expression}' + raise ValueError(msg) # replace exponantiation with `^` with the native `**` in case `^` # was used. But please use `**`.. - expression = expression.replace('^', '**') - return expression + return expression.replace('^', '**') def _verify_edps_available(available_edps: dict, required: set) -> None: """ + Verify EDP availability. + Verifies that the required EDPs are available and raises appropriate errors otherwise. @@ -1533,19 +1551,20 @@ def _verify_edps_available(available_edps: dict, required: set) -> None: for edp in edps: edp_type, location, direction = edp.split('-') if (edp_type, location) not in available_edps: - raise ValueError( + msg = ( f'Unable to locate `{edp_type}` at location ' f'{location} in demand sample.' ) + raise ValueError(msg) # if non-directional demand is requested, ensure there # are entries (all directions accepted) - num_entries = len(available_edps[(edp_type, location)]) + num_entries = len(available_edps[edp_type, location]) if edp[2] == '0' and num_entries == 0: - raise ValueError( + msg = ( f'Unable to locate any `{edp_type}` ' f'at location {location} and direction {direction}.' ) + raise ValueError(msg) if edp[2] != '0' and num_entries == 0: - raise ValueError( - f'Unable to locate `{edp_type}-{location}-{direction}`.' - ) + msg = f'Unable to locate `{edp_type}-{location}-{direction}`.' + raise ValueError(msg) diff --git a/pelicun/model/loss_model.py b/pelicun/model/loss_model.py index 0bcdf9531..7c205081b 100644 --- a/pelicun/model/loss_model.py +++ b/pelicun/model/loss_model.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,30 +37,33 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -This file defines Loss model objects and their methods. -""" +"""Loss model objects and associated methods.""" from __future__ import annotations -from typing import TYPE_CHECKING -from typing import Any -from collections.abc import Callable + +from abc import ABC, abstractmethod from collections import defaultdict from itertools import product +from pathlib import Path +from typing import TYPE_CHECKING, Any, overload + import numpy as np import pandas as pd from scipy.interpolate import RegularGridInterpolator + +from pelicun import base, file_io, uq +from pelicun.model.demand_model import ( + _assemble_required_demand_data, + _get_required_demand_type, + _verify_edps_available, +) from pelicun.model.pelicun_model import PelicunModel -from pelicun.model.demand_model import _get_required_demand_type -from pelicun.model.demand_model import _assemble_required_demand_data -from pelicun.model.demand_model import _verify_edps_available -from pelicun import base -from pelicun import uq -from pelicun import file_io -from pelicun.warnings import InconsistentUnitsError +from pelicun.pelicun_warnings import InconsistentUnitsError if TYPE_CHECKING: + from collections.abc import Callable + from pelicun.assessment import AssessmentBase idx = base.idx @@ -76,16 +78,16 @@ class LossModel(PelicunModel): """ - __slots__ = ['ds_model', 'lf_model', 'dv_units'] + __slots__ = ['ds_model', 'dv_units', 'lf_model'] def __init__( self, assessment: AssessmentBase, decision_variables: tuple[str, ...] = ('Carbon', 'Cost', 'Energy', 'Time'), dv_units: dict[str, str] | None = None, - ): + ) -> None: """ - Initializes LossModel objects. + Initialize LossModel objects. Parameters ---------- @@ -107,7 +109,7 @@ def __init__( self.dv_units = dv_units @property - def sample(self): + def sample(self) -> pd.DataFrame | None: """ Combines the samples of the ds_model and lf_model sub-models. @@ -117,7 +119,6 @@ def sample(self): The combined loss sample. """ - # Handle `None` cases if self.ds_model.sample is None and self.lf_model.sample is None: @@ -142,30 +143,27 @@ def sample(self): new_multiindex = pd.MultiIndex.from_frame(new_index) self.lf_model.sample.columns = new_multiindex - combined = pd.concat((self.ds_model.sample, self.lf_model.sample), axis=1) - - return combined + return pd.concat((self.ds_model.sample, self.lf_model.sample), axis=1) @property - def decision_variables(self): + def decision_variables(self) -> tuple[str, ...]: """ - Retrieves the decision variables to be used in the loss - calculations. + Retrieve the decision variables. Returns ------- tuple Decision variables. + """ # pick the object from one of the models # it's the same for the other(s). return self.ds_model.decision_variables @decision_variables.setter - def decision_variables(self, decision_variables): + def decision_variables(self, decision_variables: tuple[str, ...]) -> None: """ - Sets the decision variables to be used in the loss - calculations. + Set the decision variables. Supported: {`Cost`, `Time`, `Energy`, `Carbon`}. Could also be any other string, as long as the provided loss @@ -182,9 +180,10 @@ def add_loss_map( loss_map_policy: str | None = None, ) -> None: """ - Add a loss map to the loss model. A loss map defines what loss - parameter definition should be used for each component ID in - the asset model. + Add a loss map to the loss model. + + A loss map defines what loss parameter definition should be + used for each component ID in the asset model. Parameters ---------- @@ -207,15 +206,13 @@ def add_loss_map( If both arguments are None. """ - self.log.msg('Loading loss map...') # If no loss map is provided and no default is requested, # there is no loss map and we can't proceed. if loss_map_path is None and loss_map_policy is None: - raise ValueError( - 'Please provide a loss map and/or a loss map extension policy.' - ) + msg = 'Please provide a loss map and/or a loss map extension policy.' + raise ValueError(msg) # get a list of unique component IDs cmp_set = set(self._asmnt.asset.list_unique_component_ids()) @@ -233,7 +230,7 @@ def add_loss_map( assert isinstance(loss_map, pd.DataFrame) # if np.any(['DMG' in x for x in loss_map.index]): # type: ignore - self.log.warn( + self.log.warning( 'The `DMG-` flag in the loss_map index is deprecated ' 'and no longer necessary. ' 'Please do not prepend `DMG-` before the component ' @@ -259,9 +256,10 @@ def add_loss_map( # Don't do anything. pass - # TODO: add other loss map policies. + # TODO(AZ): add other loss map policies. else: - raise ValueError(f'Unknown loss map policy: `{loss_map_policy}`.') + msg = f'Unknown loss map policy: `{loss_map_policy}`.' + raise ValueError(msg) # Assign the loss map to the available loss models self._loss_map = loss_map @@ -274,11 +272,8 @@ def load_model( loss_map: str | pd.DataFrame, decision_variables: tuple[str, ...] | None = None, ) -> None: - """ - - - """ - self.log.warn( + """.""" + self.log.warning( '`load_model` is deprecated and will be dropped in ' 'future versions of pelicun. ' 'Please use `load_model_parameters` instead.' @@ -303,26 +298,21 @@ def load_model_parameters( prior elements in the list take precedence over the same parameters in subsequent data paths. I.e., place the Default datasets in the back. - - Raises - ------ - ValueError - If the method can't parse the loss parameters in the - specified paths. - InconsistentUnitsError - If there are different units used for the same decision - variable any of the data paths. + decision_variables: tuple + Defines the decision variables to be included in the loss + calculations. Defaults to those supported, but fewer can be + used if desired. When fewer are used, the loss parameters for + those not used will not be required. """ - if decision_variables is not None: # - self.decision_variables = set(decision_variables) - self.log.warn( + self.decision_variables = decision_variables + self.log.warning( 'The `decision_variables` argument has been removed. ' 'Please set your desired decision variables like so: ' '{assessment object}.loss.decision_variables ' - '= (\'dv1\', \'dv2\', ...) before calling ' + "= ('dv1', 'dv2', ...) before calling " '{assessment object}.add_loss_map().' ) @@ -337,36 +327,7 @@ def load_model_parameters( # for data_path in data_paths: - if 'bldg_repair_DB' in data_path: - data_path = data_path.replace('bldg_repair_DB', 'loss_repair_DB') - self.log.warn( - '`bldg_repair_DB` is deprecated and will ' - 'be dropped in future versions of pelicun. ' - 'Please use `loss_repair_DB` instead.' - ) - data = file_io.load_data( - data_path, None, orientation=1, reindex=False, log=self._asmnt.log - ) - assert isinstance(data, pd.DataFrame) - - # Check for unit consistency - data.index.names = ['cmp', 'dv'] - units_isolated = data.reset_index()[[('dv', ''), ('DV', 'Unit')]] - units_isolated.columns = ['dv', 'Units'] - units_isolated_grp = units_isolated.groupby('dv')['Units'] - unit_counts = units_isolated_grp.nunique() - more_than_one = unit_counts[unit_counts > 1] - if not more_than_one.empty: - raise InconsistentUnitsError(file=data_path) - - # determine if the loss model parameters are for damage - # states or loss functions - if _is_for_ds_model(data): - self.ds_model._load_model_parameters(data) - elif _is_for_lf_model(data): - self.lf_model._load_model_parameters(data) - else: - raise ValueError(f'Invalid loss model parameters: {data_path}') + self._load_from_data_path(data_path) self.log.msg( 'Loss model parameters loaded successfully.', prepend_timestamp=False @@ -380,14 +341,15 @@ def load_model_parameters( 'Removing unused loss model parameters.', prepend_timestamp=False ) + assert self._loss_map is not None for loss_model in self._loss_models: # drop unused loss parameter definitions - loss_model._drop_unused_loss_parameters(self._loss_map) + loss_model.drop_unused_loss_parameters(self._loss_map) # remove components with incomplete loss parameters - loss_model._remove_incomplete_components() + loss_model.remove_incomplete_components() # drop unused damage state columns - self.ds_model._drop_unused_damage_states() + self.ds_model.drop_unused_damage_states() # # obtain DV units @@ -395,16 +357,16 @@ def load_model_parameters( dv_units: dict = {} if self.ds_model.loss_params is not None: dv_units.update( - self.ds_model.loss_params[('DV', 'Unit')] + self.ds_model.loss_params['DV', 'Unit'] .groupby(level=[1]) - .agg(lambda x: x.value_counts().index[0]) + .first() .to_dict() ) if self.lf_model.loss_params is not None: dv_units.update( - self.lf_model.loss_params[('DV', 'Unit')] + self.lf_model.loss_params['DV', 'Unit'] .groupby(level=[1]) - .agg(lambda x: x.value_counts().index[0]) + .first() .to_dict() ) self.dv_units = dv_units @@ -417,7 +379,7 @@ def load_model_parameters( 'Converting loss model parameter units.', prepend_timestamp=False ) for loss_model in self._loss_models: - loss_model._convert_loss_parameter_units() + loss_model.convert_loss_parameter_units() # # verify loss parameter availability @@ -430,6 +392,39 @@ def load_model_parameters( ) self._ensure_loss_parameter_availability() + def _load_from_data_path(self, data_path: str | pd.DataFrame) -> None: + if 'bldg_repair_DB' in data_path: + data_path = data_path.replace('bldg_repair_DB', 'loss_repair_DB') + self.log.warning( + '`bldg_repair_DB` is deprecated and will ' + 'be dropped in future versions of pelicun. ' + 'Please use `loss_repair_DB` instead.' + ) + data = file_io.load_data( + data_path, None, orientation=1, reindex=False, log=self._asmnt.log + ) + assert isinstance(data, pd.DataFrame) + + # Check for unit consistency + data.index.names = ['cmp', 'dv'] + units_isolated = data.reset_index()[[('dv', ''), ('DV', 'Unit')]] + units_isolated.columns = pd.Index(['dv', 'Units']) + units_isolated_grp = units_isolated.groupby('dv')['Units'] + unit_counts = units_isolated_grp.nunique() + more_than_one = unit_counts[unit_counts > 1] + if not more_than_one.empty: + raise InconsistentUnitsError + + # determine if the loss model parameters are for damage + # states or loss functions + if _is_for_ds_model(data): + self.ds_model.load_model_parameters(data) + elif _is_for_lf_model(data): + self.lf_model.load_model_parameters(data) + else: + msg = f'Invalid loss model parameters: {data_path}' + raise ValueError(msg) + def calculate(self) -> None: """ Calculate the loss of each component block. @@ -462,18 +457,19 @@ def calculate(self) -> None: cmp_marginal_params = self._asmnt.asset.cmp_marginal_params assert cmp_marginal_params is not None if self._asmnt.damage.ds_model.sample is not None: - # TODO: FIND A WAY to avoid making a copy of this. + # TODO(JVM): FIND A WAY to avoid making a copy of this. dmg_quantities = self._asmnt.damage.ds_model.sample.copy() if len(demand) != len(dmg_quantities): - raise ValueError( + msg = ( f'The demand sample contains {len(demand)} realizations, ' f'but the damage sample contains {len(dmg_quantities)}. ' f'Loss calculation cannot proceed when ' f'these numbers are different. ' ) - self.ds_model._calculate(dmg_quantities) + raise ValueError(msg) + self.ds_model.calculate(dmg_quantities) - self.lf_model._calculate( + self.lf_model.calculate( demand, cmp_sample, cmp_marginal_params, @@ -481,31 +477,31 @@ def calculate(self) -> None: nondirectional_multipliers, ) - self.log.msg("Loss calculation successful.") + self.log.msg('Loss calculation successful.') def consequence_scaling(self, scaling_specification: str) -> None: """ + Apply scale factors to losses. + Applies scale factors to the loss sample according to the - given scaling specification. - - The scaling specification should be a path to a CSV file. It - should contain a `Decision Variable` column with a specified - decision variable in each row. Other optional columns are - `Component`, `Location`, `Direction`. Each row acts as an - independent scaling operation, with the scale factor defined - in the `Scale Factor` column, which is required. If any - value is missing in the optional columns, it is assumed that - the scale factor should be applied to all entries of the - loss sample where the other column values match. For example, - if the specification has a single row with `Decision Variable` - set to 'Cost', `Scale Factor` set to 2.0, and no other - columns, this will double the 'Cost' DV. If instead `Location` - was also set to `1`, it would double the Cost of all - components that have that location. The columns `Location` and - `Direction` can contain ranges, like this: `1--3` means - `1`, `2`, and `3`. If a range is used in both `Location` and - `Direction`, the factor of that row will be applied once to - all combinations. + given scaling specification. The scaling specification should + be a path to a CSV file. It should contain a `Decision + Variable` column with a specified decision variable in each + row. Other optional columns are `Component`, `Location`, + `Direction`. Each row acts as an independent scaling + operation, with the scale factor defined in the `Scale Factor` + column, which is required. If any value is missing in the + optional columns, it is assumed that the scale factor should + be applied to all entries of the loss sample where the other + column values match. For example, if the specification has a + single row with `Decision Variable` set to 'Cost', `Scale + Factor` set to 2.0, and no other columns, this will double the + 'Cost' DV. If instead `Location` was also set to `1`, it would + double the Cost of all components that have that location. The + columns `Location` and `Direction` can contain ranges, like + this: `1--3` means `1`, `2`, and `3`. If a range is used in + both `Location` and `Direction`, the factor of that row will + be applied once to all combinations. Parameters ---------- @@ -518,7 +514,6 @@ def consequence_scaling(self, scaling_specification: str) -> None: If required columns are missing or contain NaNs. """ - # Specify expected dtypes from the start. dtypes = { 'Decision Variable': 'str', @@ -534,18 +529,20 @@ def consequence_scaling(self, scaling_specification: str) -> None: 'Decision Variable' not in scaling_specification_df.columns or scaling_specification_df['Decision Variable'].isna().any() ): - raise ValueError( + msg = ( 'The `Decision Variable` column is missing ' 'from the scaling specification or contains NaN values.' ) + raise ValueError(msg) if ( 'Scale Factor' not in scaling_specification_df.columns or scaling_specification_df['Scale Factor'].isna().any() ): - raise ValueError( + msg = ( 'The `Scale Factor` column is missing ' 'from the scaling specification or contains NaN values.' ) + raise ValueError(msg) # Add missing optional columns with NaN values optional_cols = ['Component', 'Location', 'Direction'] @@ -561,14 +558,14 @@ def consequence_scaling(self, scaling_specification: str) -> None: 'Direction': 'dir', 'Scale Factor': 'scaling', } - scaling_specification_df.rename(columns=name_map, inplace=True) + scaling_specification_df = scaling_specification_df.rename(columns=name_map) # Expand ranges in 'loc' and 'dir' - def _expand_range(col): + def _expand_range(col): # noqa: ANN001, ANN202 if pd.isna(col): return [col] if '--' in col: - start, end = [int(x) for x in col.split('--')] + start, end = (int(x) for x in col.split('--')) return [str(x) for x in range(start, end + 1)] return [col] @@ -597,11 +594,11 @@ def _apply_consequence_scaling( self, scaling_conditions: dict, scale_factor: float, + *, raise_missing: bool = True, ) -> None: """ - Applies a scale factor to selected columns of the loss - samples. + Apply a scale factor to selected loss sample columns. The scaling conditions are passed as a dictionary mapping level names with their required value for the condition to be @@ -627,6 +624,8 @@ def _apply_consequence_scaling( which case only the matching rows will be affected. scale_factor: float Scale factor to use. + raise_missing: bool + Raise an error if no rows are matching the given conditions. Raises ------ @@ -635,18 +634,17 @@ def _apply_consequence_scaling( `dv` key. """ - # make sure we won't apply the same factor to all DVs at once, # highly unlikely anyone would actually want to do this. if 'dv' not in scaling_conditions: - raise ValueError( + msg = ( 'The index of the `scaling_conditions` dictionary ' 'should contain a level named `dv` listing the ' 'relevant decision variable.' ) + raise ValueError(msg) for model in self._loss_models: - # check if it's empty if model.sample is None: continue @@ -655,9 +653,10 @@ def _apply_consequence_scaling( # values exist yet) for name in scaling_conditions: if name not in model.sample.columns.names: - raise ValueError( + msg = ( f'`scaling_conditions` contains an unknown level: `{name}`.' ) + raise ValueError(msg) # apply scale factors base.multiply_factor_multiple_levels( @@ -669,10 +668,10 @@ def _apply_consequence_scaling( ) def save_sample( - self, filepath: str | None = None, save_units: bool = False + self, filepath: str | None = None, *, save_units: bool = False ) -> None | pd.DataFrame | tuple[pd.DataFrame, pd.Series]: """ - + . Saves the sample of the `ds_model`. @@ -682,7 +681,7 @@ def save_sample( The output of {loss model}.ds_model.save_sample. """ - self.log.warn( + self.log.warning( '`{loss model}.save_sample` is deprecated and will raise ' 'in future versions of pelicun. Please use ' '{loss model}.ds_model.save_sample instead.' @@ -691,12 +690,12 @@ def save_sample( def load_sample(self, filepath: str | pd.DataFrame) -> None: """ - + . Saves the sample of the `ds_model`. """ - self.log.warn( + self.log.warning( '`{loss model}.load_sample` is deprecated and will raise ' 'in future versions of pelicun. Please use ' '{loss model}.ds_model.load_sample instead.' @@ -704,16 +703,17 @@ def load_sample(self, filepath: str | pd.DataFrame) -> None: dv_units = self.ds_model.load_sample(filepath=filepath) self.dv_units = dv_units - def aggregate_losses( + def aggregate_losses( # noqa: C901 self, replacement_configuration: ( tuple[uq.RandomVariableRegistry, dict[str, float]] | None ) = None, loss_combination: dict | None = None, + *, future: bool = False, ) -> pd.DataFrame | tuple[pd.DataFrame, pd.DataFrame]: """ - Aggregates the losses produced by each component. + Aggregate the losses produced by each component. Parameters ---------- @@ -725,7 +725,7 @@ def aggregate_losses( thresholds. If the aggregated value for a decision variable (conditioned on no replacement) exceeds the threshold, then replacement is triggered. This can happen - for multuple decision variables at the same + for multiple decision variables at the same realization. The consequence keyword `replacement` is reserved to represent exclusive triggering of the replacement consequences, and other consequences are @@ -733,9 +733,9 @@ def aggregate_losses( triggered. When assigned to None, then `replacement` is still treated as an exclusive consequence (other consequences are set to zero when replacement is nonzero) - but it is not being additinally triggered by the + but it is not being additionally triggered by the exceedance of any thresholds. The aggregated loss sample - conains an additional column with information on whether + contains an additional column with information on whether replacement was already present or triggered by a threshold exceedance for each realization. loss_combination: dict, optional @@ -759,33 +759,30 @@ def aggregate_losses( This structure allows for the loss combination of M components. In this case the (`c1`, `c2`) tuple should contain M elements instead of two. + future: bool, optional + Defaults to False. When set to True, it enables the + updated return type. - Note - ---- + Notes + ----- Regardless of the value of the arguments, this method does not alter the state of the loss model, i.e., it does not modify the values of the `.sample` attributes. Returns ------- - tuple + dataframe or tuple Dataframe with the aggregated loss of each realization, and another boolean dataframe with information on which DV thresholds were exceeded in each realization, triggering replacement. If no thresholds are specified it only - contains False values. - - Raises - ------ - ValueError - When inputs are invalid. + contains False values. The second dataframe is only + returned with `future` set to True. """ - - # TODO - # When we start working on the documentation, simplify the - # docstring above and point the relevant detailed section in - # the documentation. + # TODO(JVM): When we start working on the documentation, + # simplify the docstring above and point the relevant detailed + # section in the documentation. # validate input if replacement_configuration is not None: @@ -807,7 +804,7 @@ def aggregate_losses( else: lf_sample = None - def _construct_columns(): + def _construct_columns() -> list[str]: columns = [ f'repair_{x.lower()}' for x in self.decision_variables if x != 'Time' ] @@ -818,9 +815,8 @@ def _construct_columns(): return columns if ds_sample is None and lf_sample is None: - self.log.msg("There are no losses.") - df_agg = pd.DataFrame(0.00, index=[0], columns=_construct_columns()) - return df_agg + self.log.msg('There are no losses.') + return pd.DataFrame(0.00, index=[0], columns=_construct_columns()) # # handle `replacement`, regardless of whether @@ -839,7 +835,7 @@ def _construct_columns(): column_levels = ['dv', 'loss', 'dmg', 'loc', 'dir', 'uid'] combined_sample = self.sample sample = ( - combined_sample.groupby(by=column_levels, axis=1) + combined_sample.groupby(by=column_levels, axis=1) # type: ignore .sum() .sort_index(axis=1) ) @@ -864,7 +860,7 @@ def _construct_columns(): df_agg = self._aggregate_sample(sample, _construct_columns()) if not future: - self.log.warn( + self.log.warning( '`aggregate_losses` has been expanded to support the ' 'consideration of the exceedance of loss threshold ' 'values leading to asset replacement ' @@ -883,28 +879,32 @@ def _construct_columns(): def _validate_input_loss_combination(self, loss_combination: dict) -> None: for dv, combinations in loss_combination.items(): if dv not in self.decision_variables: - raise ValueError( + msg = ( f'`loss_combination` contains the key ' f'`{dv}` which is not found in the active ' f'decision variables. These are: ' f'{self.decision_variables}.' ) + raise ValueError(msg) for components, array in combinations.items(): if not isinstance(components, tuple): - raise ValueError( + msg = ( f'Invalid type for components in loss combination ' f'for `{dv}`: {type(components)}. It should be a tuple.' ) + raise TypeError(msg) if not all(isinstance(c, str) for c in components): - raise ValueError( + msg = ( f'All elements of the components tuple in loss ' f'combination for `{dv}` should be strings.' ) + raise ValueError(msg) if not isinstance(array, np.ndarray): - raise ValueError( + msg = ( f'Invalid type for array in loss combination ' f'for `{dv}`: {type(array)}. It should be a numpy array.' ) + raise TypeError(msg) def _validate_input_replacement_thresholds( self, @@ -912,16 +912,17 @@ def _validate_input_replacement_thresholds( uq.RandomVariableRegistry, dict[str, float] ], ) -> None: - replacement_consequence_RV_reg, replacement_ratios = ( + replacement_consequence_rv_reg, replacement_ratios = ( replacement_configuration ) - if not isinstance(replacement_consequence_RV_reg, uq.RandomVariableRegistry): - raise TypeError( + if not isinstance(replacement_consequence_rv_reg, uq.RandomVariableRegistry): + msg = ( f'Invalid type for replacement consequence RV registry: ' - f'{type(replacement_consequence_RV_reg)}. It should be ' + f'{type(replacement_consequence_rv_reg)}. It should be ' f'uq.RandomVariableRegistry.' ) - for key in replacement_consequence_RV_reg.RV: + raise TypeError(msg) + for key in replacement_consequence_rv_reg.RV: if key not in self.decision_variables: msg = ( f'`replacement_consequence_RV_reg` contains the key ' @@ -933,7 +934,7 @@ def _validate_input_replacement_thresholds( 'Loss/ReplacementThreshold/RaiseOnUnknownKeys' ): raise ValueError(msg) - self.log.warn(msg) + self.log.warning(msg) for key in replacement_ratios: if key not in self.decision_variables: @@ -947,23 +948,21 @@ def _validate_input_replacement_thresholds( 'Loss/ReplacementThreshold/RaiseOnUnknownKeys' ): raise ValueError(msg) - self.log.warn(msg) + self.log.warning(msg) # The replacement_consequence_RV_reg should contain an RV for # all active DVs, regardless of whether there is a replacement - # threshold for that DV, becauase when replacememnt is + # threshold for that DV, because when replacememnt is # triggered, we need to assign a consequence for all DVs. for key in self.decision_variables: - if key not in replacement_consequence_RV_reg.RV: - raise ValueError( - f'Missing replacement consequence RV ' f'for `{key}`.' - ) + if key not in replacement_consequence_rv_reg.RV: + msg = f'Missing replacement consequence RV ' f'for `{key}`.' + raise ValueError(msg) def _apply_loss_combinations( self, loss_combination: dict, sample: pd.DataFrame ) -> pd.DataFrame: """ - Performs non-additive loss combinations of specified - components. + Perform loss combinations of specified components. This function deconstructs the loss combination arrays, identifies the combinable components, and applies the @@ -973,14 +972,14 @@ def _apply_loss_combinations( Parameters ---------- - loss_combination : dict + loss_combination: dict A dictionary containing the loss combination information. The structure is nested dictionaries where the outer keys are decision variables, inner keys are components to combine, and the values are array objects representing the combination data. - sample : pandas.DataFrame + sample: pandas.DataFrame The input DataFrame containing the sample data. The columns are assumed to be a MultiIndex with at least the levels (decision_variable, loss_id, component_id, @@ -992,7 +991,6 @@ def _apply_loss_combinations( A new DataFrame with the combined loss data. """ - # deconstruct combination arrays to extract the input domains loss_combination_converted = self._deconstruct_loss_combination_arrays( loss_combination @@ -1015,18 +1013,17 @@ def _apply_loss_combinations( dsample, loss_combination_converted, dcsample ) # and the remaining - for col in dsample: - dcsample[col] = dsample[col] + for col, val in dsample.items(): + dcsample[col] = val # noqa: PERF403 # turn into a dataframe - sample = pd.DataFrame(dcsample).rename_axis(columns=sample.columns.names) - return sample + return pd.DataFrame(dcsample).rename_axis(columns=sample.columns.names) def _loss_combination_add_combinable( self, dsample: dict, loss_combination_converted: dict, dcsample: dict ) -> None: """ - Adds combinable loss data. + Add combinable loss data. This function identifies groups of `loc`-`dir`-`uid` that can be combined for each decision variable and computes the @@ -1037,20 +1034,20 @@ def _loss_combination_add_combinable( Parameters ---------- - dsample : dict + dsample: dict A dictionary representing the loss sample data, where keys are tuples of the form (decision_variable, loss_id, component_id, location, direction, uid) and values are the corresponding data arrays. - loss_combination_converted : dict + loss_combination_converted: dict A dictionary containing loss combination data. The structure is nested dictionaries where the outer keys are decision variables, inner keys are components to combine, and the values are tuples of combination parameters (domains and reference values). - dcsample : dict + dcsample: dict A dictionary to store the combined loss data, where keys are tuples of the form (decision_variable, 'combination', combined_component_string, location, direction, uid) and @@ -1118,10 +1115,9 @@ def _loss_combination_add_combinable( for col in cols: dsample.pop(col) - def _identify_potential_groups(self, dsample: dict) -> dict: + def _identify_potential_groups(self, dsample: dict) -> dict: # noqa: PLR6301 """ - Identifies potential groups of `loc`-`dir`-`uid` for each - decision variable. + Identify potential groups of `loc`-`dir`-`uid` for each DV. This function identifies all combinations of `loc`-`dir`-`uid` that can be grouped for each decision variable based on the @@ -1129,7 +1125,7 @@ def _identify_potential_groups(self, dsample: dict) -> dict: Parameters ---------- - dsample : iterable + dsample: iterable An iterable where each containing tuple contains information about the components and their attributes. The expected format of each tuple is (decision_variable, @@ -1156,9 +1152,9 @@ def _identify_potential_groups(self, dsample: dict) -> dict: inverted[c_dv][frozenset(component_set)].append(loc_dir_uid) return inverted - def _map_component_ids_to_loss_ids(self, dsample: dict) -> dict: + def _map_component_ids_to_loss_ids(self, dsample: dict) -> dict: # noqa: PLR6301 """ - Maps component IDs to loss IDs. + Map component IDs to loss IDs. This function maps components to losses based on the loss sample's columns. It assumes that multiple component IDs can @@ -1167,7 +1163,7 @@ def _map_component_ids_to_loss_ids(self, dsample: dict) -> dict: Parameters ---------- - dsample : tuple dictionary keys + dsample: tuple dictionary keys Each tuple contains information about the components and corresponding losses. @@ -1185,7 +1181,7 @@ def _map_component_ids_to_loss_ids(self, dsample: dict) -> dict: dmg_to_loss[c_dmg] = c_loss return dmg_to_loss - def _deconstruct_loss_combination_arrays(self, loss_combination: dict) -> dict: + def _deconstruct_loss_combination_arrays(self, loss_combination: dict) -> dict: # noqa: PLR6301 """ Deconstruct loss combination arrays. @@ -1196,7 +1192,7 @@ def _deconstruct_loss_combination_arrays(self, loss_combination: dict) -> dict: Parameters ---------- - loss_combination : dict + loss_combination: dict A dictionary where keys are decision variables and values are another dictionary. The inner dictionary has keys as components to combine and values as numpy array @@ -1233,28 +1229,39 @@ def _deconstruct_loss_combination_arrays(self, loss_combination: dict) -> dict: def _aggregate_sample(self, sample: pd.DataFrame, columns: list) -> pd.DataFrame: """ - Sums up component losses. + Sum up component losses. + + Returns + ------- + pd.DataFrame + Dataframe with the aggregated losses. """ df_agg = pd.DataFrame(index=sample.index, columns=columns) # group results by DV type and location aggregated = sample.groupby( - level=['dv', 'loc'], axis=1 # type: ignore + level=['dv', 'loc'], + axis=1, # type: ignore ).sum() for decision_variable in self.decision_variables: - # Time - if decision_variable == 'Time' and 'Time' in aggregated.columns: + if ( + decision_variable == 'Time' + and 'Time' in aggregated.columns.get_level_values('dv') + ): df_agg['repair_time-sequential'] = aggregated['Time'].sum(axis=1) df_agg['repair_time-parallel'] = aggregated['Time'].max(axis=1) - elif decision_variable == 'Time' and 'Time' not in aggregated.columns: + elif ( + decision_variable == 'Time' + and 'Time' not in aggregated.columns.get_level_values('dv') + ): df_agg = df_agg.drop( ['repair_time-parallel', 'repair_time-sequential'], axis=1 ) # All other - elif decision_variable in aggregated.columns: + elif decision_variable in aggregated.columns.get_level_values('dv'): df_agg[f'repair_{decision_variable.lower()}'] = aggregated[ decision_variable ].sum(axis=1) @@ -1268,6 +1275,7 @@ def _aggregate_sample(self, sample: pd.DataFrame, columns: list) -> pd.DataFrame .replace('-parallel', '') for x in df_agg.columns.get_level_values(0) ] + assert self.dv_units is not None column_units = [self.dv_units[x.title()] for x in column_measures] dv_units = pd.Series(column_units, index=df_agg.columns, name='Units') res = file_io.save_to_csv( @@ -1280,66 +1288,67 @@ def _aggregate_sample(self, sample: pd.DataFrame, columns: list) -> pd.DataFrame ) assert isinstance(res, pd.DataFrame) df_agg = res - df_agg.drop("Units", inplace=True) + df_agg = df_agg.drop('Units') df_agg = df_agg.astype(float) df_agg_mi = base.convert_to_MultiIndex(df_agg, axis=1) assert isinstance(df_agg_mi, pd.DataFrame) df_agg = df_agg_mi - df_agg.sort_index(axis=1, inplace=True) + df_agg = df_agg.sort_index(axis=1) df_agg = df_agg.reset_index(drop=True) assert isinstance(df_agg, pd.DataFrame) return df_agg - def _apply_replacement_thresholds( + def _apply_replacement_thresholds( # noqa: PLR6301 self, sample: pd.DataFrame, replacement_configuration: ( tuple[uq.RandomVariableRegistry, dict[str, float]] | None ), ) -> tuple[pd.DataFrame, pd.DataFrame]: - # If there is no `replacement_configuration`, simply return. if replacement_configuration is None: # `exceedance_bool_df` is empty in this case. exceedance_bool_df = pd.DataFrame(index=sample.index, dtype=bool) return sample, exceedance_bool_df - replacement_consequence_RV_reg, replacement_ratios = ( + replacement_consequence_rv_reg, replacement_ratios = ( replacement_configuration ) exceedance_bool_df = pd.DataFrame( # type: ignore - False, + data=False, index=sample.index, - columns=replacement_consequence_RV_reg.RV.keys(), + columns=replacement_consequence_rv_reg.RV.keys(), dtype=bool, ) # Sample replacement consequences from the registry - replacement_consequence_RV_reg.generate_sample(len(sample), 'MonteCarlo') + replacement_consequence_rv_reg.generate_sample(len(sample), 'MonteCarlo') - sample_dvs = replacement_consequence_RV_reg.RV.keys() + sample_dvs = replacement_consequence_rv_reg.RV.keys() for sample_dv in sample_dvs: sub_sample = sample.loc[:, sample_dv] if 'replacement' in sub_sample.columns.get_level_values('loss'): # If `replacement` already exists as a consequence, # determine the realizations where it is non-zero. no_replacement_mask = ( - ~(sub_sample['replacement'] > 0.00).any(axis=1).values + ~(sub_sample['replacement'] > 0.00).any(axis=1).to_numpy() ) no_replacement_columns = ( sub_sample.columns.get_level_values('loss') != 'replacement' ) else: # Otherwise there is no row where we already have replacement - no_replacement_mask = np.full(len(sub_sample), True) - no_replacement_columns = np.full(len(sub_sample.columns), True) + no_replacement_mask = np.full(len(sub_sample), fill_value=True) + no_replacement_columns = np.full( + len(sub_sample.columns), fill_value=True + ) # Get the sum to compare with the thresholds consequence_sum_given_no_replacement = sub_sample.iloc[ # type: ignore no_replacement_mask, no_replacement_columns ].sum(axis=1) if not consequence_sum_given_no_replacement.empty: - consequence_values = replacement_consequence_RV_reg.RV[ + consequence_values = replacement_consequence_rv_reg.RV[ sample_dv ].sample assert consequence_values is not None @@ -1355,7 +1364,8 @@ def _apply_replacement_thresholds( ) else: exceedance_mask = pd.Series( - np.full(len(sub_sample), False), index=sub_sample.index + np.full(len(sub_sample), fill_value=False), + index=sub_sample.index, ) # Monitor triggering of replacement @@ -1365,7 +1375,7 @@ def _apply_replacement_thresholds( # exceeded. exceedance_realizations = exceedance_bool_df.any(axis=1) # Assign replacement consequences: needs to include all DVs - for other_dv in replacement_consequence_RV_reg.RV.keys(): + for other_dv in replacement_consequence_rv_reg.RV: col = ( other_dv, 'replacement', @@ -1379,7 +1389,7 @@ def _apply_replacement_thresholds( sample[col] = 0.00 sample = sample.sort_index(axis=1) # Assign replacement consequences - other_sample = replacement_consequence_RV_reg.RV[other_dv].sample + other_sample = replacement_consequence_rv_reg.RV[other_dv].sample assert other_sample is not None sample.loc[exceedance_realizations, col] = other_sample[ exceedance_realizations @@ -1393,16 +1403,17 @@ def _apply_replacement_thresholds( return sample, exceedance_bool_df - def _make_replacement_exclusive( + def _make_replacement_exclusive( # noqa: PLR6301 self, ds_sample: pd.DataFrame, lf_sample: pd.DataFrame | None ) -> None: """ + Make the replacement consequence exclusive. + If `replacement` columns exist in `ds_sample`, this method treats all nonzero loss values driven by `replacement` as exclusive and zeroes-out the loss values of all other columns for the applicable rows. """ - # rows where replacement is non-zero replacement_rows: list = [] @@ -1414,7 +1425,7 @@ def _make_replacement_exclusive( if not rows_df.empty: replacement_rows = ( - np.argwhere(np.any(rows_df.values > 0.0, axis=1)) + np.argwhere(np.any(rows_df.to_numpy() > 0.0, axis=1)) .reshape(-1) .tolist() ) @@ -1423,11 +1434,11 @@ def _make_replacement_exclusive( lf_sample.iloc[replacement_rows, :] = 0.00 @property - def _loss_models(self): + def _loss_models(self) -> tuple[RepairModel_DS, RepairModel_LF]: return (self.ds_model, self.lf_model) @property - def _loss_map(self): + def _loss_map(self) -> pd.DataFrame | None: """ Returns the loss map. @@ -1439,12 +1450,12 @@ def _loss_map(self): """ # Retrieve the DataFrame from one of the included loss models. # We use a single loss map for all. - return self.ds_model._loss_map + return self.ds_model.loss_map @_loss_map.setter - def _loss_map(self, loss_map): + def _loss_map(self, loss_map: pd.DataFrame) -> None: """ - Sets the loss map. + Set the loss map. Parameters ---------- @@ -1455,10 +1466,10 @@ def _loss_map(self, loss_map): # Add the DataFrame to the included loss models. # We use a single loss map for all. for model in self._loss_models: - model._loss_map = loss_map + model.loss_map = loss_map @property - def _missing(self): + def _missing(self) -> set[tuple[str, str]]: """ Returns the missing components. @@ -1469,12 +1480,12 @@ def _missing(self): definitions. """ - return self.ds_model._missing + return self.ds_model.missing @_missing.setter - def _missing(self, missing): + def _missing(self, missing: set[tuple[str, str]]) -> None: """ - Assigns missing parameter definitions to the loss models. + Assign missing parameter definitions to the loss models. Parameters ---------- @@ -1484,19 +1495,16 @@ def _missing(self, missing): """ for model in self._loss_models: - model._missing = missing - - def _ensure_loss_parameter_availability(self): - """ - Makes sure that all components have loss parameters. - - """ + model.missing = missing + def _ensure_loss_parameter_availability(self) -> None: + """Make sure that all components have loss parameters.""" # # Repair Models (currently the only type supported) # required = [] + assert self._loss_map is not None for dv in self.decision_variables: required.extend( [(component, dv) for component in self._loss_map['Repair']] @@ -1504,36 +1512,33 @@ def _ensure_loss_parameter_availability(self): missing_set = set(required) for model in (self.ds_model, self.lf_model): - missing_set = missing_set - model._get_available() + missing_set -= model.get_available() if missing_set: - self.log.warn( - f"The loss model does not provide " - f"loss information for the following component(s) " - f"in the asset model: {sorted(list(missing_set))}." + self.log.warning( + f'The loss model does not provide ' + f'loss information for the following component(s) ' + f'in the asset model: {sorted(missing_set)}.' ) self._missing = missing_set class RepairModel_Base(PelicunModel): - """ - Base class for loss models - - """ + """Base class for loss models.""" __slots__ = [ - 'loss_params', - 'sample', 'consequence', 'decision_variables', - '_loss_map', - '_missing', + 'loss_map', + 'loss_params', + 'missing', + 'sample', ] - def __init__(self, assessment: AssessmentBase): + def __init__(self, assessment: AssessmentBase) -> None: """ - Initializes RepairModel_Base objects. + Initialize RepairModel_Base objects. Parameters ---------- @@ -1546,15 +1551,16 @@ def __init__(self, assessment: AssessmentBase): self.loss_params: pd.DataFrame | None = None self.sample: pd.DataFrame | None = None self.consequence = 'Repair' - self.decision_variables: tuple[str, ...] = tuple() - self._loss_map: pd.DataFrame | None = None - self._missing: set = set() + self.decision_variables: tuple[str, ...] = () + self.loss_map: pd.DataFrame | None = None + self.missing: set = set() - def _load_model_parameters(self, data: pd.DataFrame) -> None: + def load_model_parameters(self, data: pd.DataFrame) -> None: """ - Load model parameters from a DataFrame, extending those - already available. Parameters already defined take precedence, - i.e. redefinitions of parameters are ignored. + Load model parameters from a DataFrame. + + Extends those already available. Parameters already defined + take precedence, i.e. redefinitions of parameters are ignored. Parameters ---------- @@ -1562,7 +1568,6 @@ def _load_model_parameters(self, data: pd.DataFrame) -> None: Data with loss model information. """ - data.index.names = ['Loss Driver', 'Decision Variable'] if self.loss_params is not None: @@ -1577,14 +1582,15 @@ def _load_model_parameters(self, data: pd.DataFrame) -> None: self.loss_params = data - def _drop_unused_loss_parameters(self, loss_map: pd.DataFrame) -> None: + def drop_unused_loss_parameters(self, loss_map: pd.DataFrame) -> None: """ - Removes loss parameter definitions for component IDs not - present in the loss map. + Remove loss parameter definitions. + + Applicable to component IDs not present in the loss map. Parameters ---------- - loss_map_path: str or pd.DataFrame or None + loss_map: str or pd.DataFrame or None Path to a csv file or DataFrame object that maps components IDs to their loss parameter definitions. Components in the asset model that are omitted from the @@ -1592,15 +1598,14 @@ def _drop_unused_loss_parameters(self, loss_map: pd.DataFrame) -> None: """ - if self.loss_params is None: return # if 'BldgRepair' in loss_map.columns: loss_map['Repair'] = loss_map['BldgRepair'] - loss_map.drop('BldgRepair', axis=1, inplace=True) - self.log.warn( + loss_map = loss_map.drop('BldgRepair', axis=1) + self.log.warning( '`BldgRepair` as a loss map column name is ' 'deprecated and will be dropped in ' 'future versions of pelicun. Please use `Repair` instead.' @@ -1612,8 +1617,10 @@ def _drop_unused_loss_parameters(self, loss_map: pd.DataFrame) -> None: cmp_mask = self.loss_params.index.get_level_values(0).isin(cmp_set, level=0) self.loss_params = self.loss_params.iloc[cmp_mask, :] - def _remove_incomplete_components(self) -> None: + def remove_incomplete_components(self) -> None: """ + Remove incomplete components. + Removes components that have incomplete loss model definitions from the loss model parameters. @@ -1625,48 +1632,51 @@ def _remove_incomplete_components(self) -> None: return cmp_incomplete_idx = self.loss_params.loc[ - self.loss_params[('Incomplete', '')] == 1 + self.loss_params['Incomplete', ''] == 1 ].index - self.loss_params.drop(cmp_incomplete_idx, inplace=True) + self.loss_params = self.loss_params.drop(cmp_incomplete_idx) if len(cmp_incomplete_idx) > 0: self.log.msg( - f"\n" - f"WARNING: Loss model information is incomplete for " - f"the following component(s) " - f"{cmp_incomplete_idx.to_list()}. They " - f"were removed from the analysis." - f"\n", + f'\n' + f'WARNING: Loss model information is incomplete for ' + f'the following component(s) ' + f'{cmp_incomplete_idx.to_list()}. They ' + f'were removed from the analysis.' + f'\n', prepend_timestamp=False, ) - def _get_available(self) -> set: + def get_available(self) -> set: """ - Get a set of components for which loss parameters are - available. + Get a set of components with available loss parameters. + + Returns + ------- + set + Set of components with available loss parameters. """ if self.loss_params is not None: cmp_list = self.loss_params.index.to_list() return set(cmp_list) return set() + @abstractmethod + def convert_loss_parameter_units(self) -> None: + """Convert previously loaded loss parameters to base units.""" -class RepairModel_DS(RepairModel_Base): - """ - Manages repair consequences driven by components that are modeled - with discrete Damage States (DS) - """ +class RepairModel_DS(RepairModel_Base): + """Repair consequences for components with damage states.""" __slots__ = ['RV_reg'] def save_sample( - self, filepath: str | None = None, save_units: bool = False + self, filepath: str | None = None, *, save_units: bool = False ) -> None | pd.DataFrame | tuple[pd.DataFrame, pd.Series]: """ - Saves the loss sample to a CSV file or returns it as a - DataFrame with optional units. + Save or return the loss sample. This method handles the storage of a sample of loss estimates, which can either be saved directly to a file or returned as a @@ -1678,11 +1688,11 @@ def save_sample( Parameters ---------- - filepath : str, optional + filepath: str, optional The path to the file where the loss sample should be saved. If not provided, the sample is not saved to disk but returned. - save_units : bool, default: False + save_units: bool, default: False Indicates whether to include a row with unit information in the returned DataFrame. This parameter is ignored if a file path is provided. @@ -1697,21 +1707,14 @@ def save_sample( * Optionally, a Series containing the units for each column if `save_units` is True. - Raises - ------ - IOError - Raises an IOError if there is an issue saving the file to - the specified `filepath`. - """ - self.log.div() if filepath is not None: self.log.msg('Saving loss sample...') assert self.sample is not None assert self.loss_params is not None - cmp_units = self.loss_params[('DV', 'Unit')] + cmp_units = self.loss_params['DV', 'Unit'] dv_units = pd.Series(index=self.sample.columns, name='Units', dtype='object') valid_dv_types = dv_units.index.unique(level=0) @@ -1719,11 +1722,11 @@ def save_sample( for cmp_id, dv_type in cmp_units.index: if (dv_type in valid_dv_types) and (cmp_id in valid_cmp_ids): - dv_units.loc[(dv_type, cmp_id)] = cmp_units.at[(cmp_id, dv_type)] + dv_units.loc[dv_type, cmp_id] = cmp_units.loc[cmp_id, dv_type] res = file_io.save_to_csv( self.sample, - filepath, + Path(filepath) if filepath is not None else None, units=dv_units, unit_conversion_factors=self._asmnt.unit_conversion_factors, use_simpleindex=(filepath is not None), @@ -1735,8 +1738,8 @@ def save_sample( assert isinstance(res, pd.DataFrame) - units = res.loc["Units"] - res.drop("Units", inplace=True) + units = res.loc['Units'] + res = res.drop('Units') res = res.astype(float) assert isinstance(res, pd.DataFrame) assert isinstance(units, pd.Series) @@ -1778,6 +1781,8 @@ def load_sample(self, filepath: str | pd.DataFrame) -> dict[str, str]: log=self._asmnt.log, return_units=True, ) + assert isinstance(sample, pd.DataFrame) + assert isinstance(units, pd.Series) units.index.names = names # Obtain the DV units # Note: we don't need to check for consistency (all rows @@ -1792,16 +1797,19 @@ def load_sample(self, filepath: str | pd.DataFrame) -> dict[str, str]: # check if `uid` level was provided num_levels = len(sample.columns.names) - if num_levels == 6: + num_levels_without_uid = 6 + num_levels_with_uid = num_levels_without_uid + 1 + if num_levels == num_levels_without_uid: sample.columns.names = names[:-1] sample = base.dedupe_index(sample.T).T - elif num_levels == 7: + elif num_levels == num_levels_with_uid: sample.columns.names = names else: - raise ValueError( + msg = ( f'Invalid loss sample: Column MultiIndex ' f'has an unexpected length: {num_levels}' ) + raise ValueError(msg) self.sample = sample @@ -1809,10 +1817,9 @@ def load_sample(self, filepath: str | pd.DataFrame) -> dict[str, str]: return dv_units - def _calculate(self, dmg_quantities: pd.DataFrame) -> None: + def calculate(self, dmg_quantities: pd.DataFrame) -> None: # noqa: C901 """ - Calculate the damage consequences of each damage state-driven - performance group in the asset. + Calculate damage consequences. Parameters ---------- @@ -1822,14 +1829,8 @@ def _calculate(self, dmg_quantities: pd.DataFrame) -> None: and direction. You can use the prepare_dmg_quantities method in the DamageModel to get such a DF. - Raises - ------ - ValueError - When any Loss Driver is not recognized. - """ - - assert self._loss_map is not None + assert self.loss_map is not None sample_size = len(dmg_quantities) @@ -1837,16 +1838,16 @@ def _calculate(self, dmg_quantities: pd.DataFrame) -> None: if set(dmg_quantities.columns.get_level_values('ds')) == {'0'}: self.sample = None self.log.msg( - "There is no damage---DV sample is set to None.", + 'There is no damage---DV sample is set to None.', prepend_timestamp=False, ) return # calculate the quantities for economies of scale - self.log.msg("\nAggregating damage quantities...", prepend_timestamp=False) + self.log.msg('\nAggregating damage quantities...', prepend_timestamp=False) - if self._asmnt.options.eco_scale["AcrossFloors"]: - if self._asmnt.options.eco_scale["AcrossDamageStates"]: + if self._asmnt.options.eco_scale['AcrossFloors']: + if self._asmnt.options.eco_scale['AcrossDamageStates']: eco_levels = [0] eco_columns = ['cmp'] @@ -1854,7 +1855,7 @@ def _calculate(self, dmg_quantities: pd.DataFrame) -> None: eco_levels = [0, 4] eco_columns = ['cmp', 'ds'] - elif self._asmnt.options.eco_scale["AcrossDamageStates"]: + elif self._asmnt.options.eco_scale['AcrossDamageStates']: eco_levels = [0, 1] eco_columns = ['cmp', 'loc'] @@ -1867,32 +1868,32 @@ def _calculate(self, dmg_quantities: pd.DataFrame) -> None: assert eco_qnt.columns.names == eco_columns self.log.msg( - "Successfully aggregated damage quantities.", prepend_timestamp=False + 'Successfully aggregated damage quantities.', prepend_timestamp=False ) # apply the median functions, if needed, to get median consequences for # each realization self.log.msg( - "\nCalculating the median repair consequences...", + '\nCalculating the median repair consequences...', prepend_timestamp=False, ) medians = self._calc_median_consequence(eco_qnt) self.log.msg( - "Successfully determined median repair consequences.", + 'Successfully determined median repair consequences.', prepend_timestamp=False, ) # combine the median consequences with the samples of deviation from the # median to get the consequence realizations. self.log.msg( - "\nConsidering deviations from the median values to obtain " - "random DV sample..." + '\nConsidering deviations from the median values to obtain ' + 'random DV sample...' ) self.log.msg( - "Preparing random variables for repair consequences...", + 'Preparing random variables for repair consequences...', prepend_timestamp=False, ) self.RV_reg = self._create_DV_RVs(dmg_quantities.columns) # type: ignore @@ -1907,33 +1908,31 @@ def _calculate(self, dmg_quantities: pd.DataFrame) -> None: pd.DataFrame(self.RV_reg.RV_sample), axis=1 ) std_sample.columns.names = ['dv', 'cmp', 'ds', 'loc', 'dir', 'uid'] - std_sample.sort_index(axis=1, inplace=True) + std_sample = std_sample.sort_index(axis=1) else: std_sample = None self.log.msg( - f"\nSuccessfully generated {sample_size} realizations of " - "deviation from the median consequences.", + f'\nSuccessfully generated {sample_size} realizations of ' + 'deviation from the median consequences.', prepend_timestamp=False, ) res_list = [] key_list: list[tuple[Any, ...]] = [] - dmg_quantities.columns = ( - dmg_quantities.columns.reorder_levels( # type: ignore - ['cmp', 'ds', 'loc', 'dir', 'uid'] - ) + dmg_quantities.columns = dmg_quantities.columns.reorder_levels( # type: ignore + ['cmp', 'ds', 'loc', 'dir', 'uid'] ) - dmg_quantities.sort_index(axis=1, inplace=True) + dmg_quantities = dmg_quantities.sort_index(axis=1) if std_sample is not None: std_dvs = std_sample.columns.unique(level=0) else: std_dvs = [] - for decision_variable in self.decision_variables: + for decision_variable in self.decision_variables: # noqa: PLR1702 if decision_variable in std_dvs: assert isinstance(std_sample, pd.DataFrame) prob_cmp_list = std_sample[decision_variable].columns.unique(level=0) @@ -1946,7 +1945,7 @@ def _calculate(self, dmg_quantities: pd.DataFrame) -> None: continue for component in medians[decision_variable].columns.unique(level=0): # check if there is damage in the component - consequence = self._loss_map.at[component, 'Repair'] + consequence = self.loss_map.loc[component, 'Repair'] if component not in dmg_quantities.columns.get_level_values('cmp'): continue @@ -1966,11 +1965,11 @@ def _calculate(self, dmg_quantities: pd.DataFrame) -> None: ].columns.unique(level=0) ): if ( - self._asmnt.options.eco_scale["AcrossFloors"] is True + self._asmnt.options.eco_scale['AcrossFloors'] is True ) and (loc_id > 0): break - if self._asmnt.options.eco_scale["AcrossFloors"] is True: + if self._asmnt.options.eco_scale['AcrossFloors'] is True: median_i = medians[decision_variable].loc[ :, (component, ds) ] @@ -2020,21 +2019,21 @@ def _calculate(self, dmg_quantities: pd.DataFrame) -> None: loc_list.append(loc) - if self._asmnt.options.eco_scale["AcrossFloors"] is True: + if self._asmnt.options.eco_scale['AcrossFloors'] is True: ds_list += [ ds, ] else: ds_list += [(ds, loc) for loc in loc_list] - if self._asmnt.options.eco_scale["AcrossFloors"] is True: + if self._asmnt.options.eco_scale['AcrossFloors'] is True: cmp_list += [(consequence, component, ds) for ds in ds_list] else: cmp_list += [ (consequence, component, ds, loc) for ds, loc in ds_list ] - if self._asmnt.options.eco_scale["AcrossFloors"] is True: + if self._asmnt.options.eco_scale['AcrossFloors'] is True: key_list += [ (decision_variable, loss_cmp_i, dmg_cmp_i, ds) for loss_cmp_i, dmg_cmp_i, ds in cmp_list @@ -2046,22 +2045,19 @@ def _calculate(self, dmg_quantities: pd.DataFrame) -> None: ] lvl_names = ['dv', 'loss', 'dmg', 'ds', 'loc', 'dir', 'uid'] - DV_sample = pd.concat(res_list, axis=1, keys=key_list, names=lvl_names) - - DV_sample = DV_sample.fillna(0).convert_dtypes() + dv_sample = pd.concat(res_list, axis=1, keys=key_list, names=lvl_names) - self.log.msg("Successfully obtained DV sample.", prepend_timestamp=False) - self.sample = DV_sample + dv_sample = dv_sample.fillna(0).convert_dtypes() - def _convert_loss_parameter_units(self) -> None: - """ - Converts previously loaded loss parameters to base units. + self.log.msg('Successfully obtained DV sample.', prepend_timestamp=False) + self.sample = dv_sample - """ + def convert_loss_parameter_units(self) -> None: + """Convert previously loaded loss parameters to base units.""" if self.loss_params is None: return - units = self.loss_params[('DV', 'Unit')] - arg_units = self.loss_params[('Quantity', 'Unit')] + units = self.loss_params['DV', 'Unit'] + arg_units = self.loss_params['Quantity', 'Unit'] for column in self.loss_params.columns.unique(level=0): if not column.startswith('DS'): continue @@ -2069,11 +2065,13 @@ def _convert_loss_parameter_units(self) -> None: assert isinstance(params, pd.DataFrame) self.loss_params.loc[:, column] = self._convert_marginal_params( params, units, arg_units - ).values + ).to_numpy() - def _drop_unused_damage_states(self) -> None: + def drop_unused_damage_states(self) -> None: """ - Removes columns from the loss model parameters corresponding + Remove unused columns. + + Remove columns from the loss model parameters corresponding to unused damage states. """ @@ -2098,14 +2096,16 @@ def _drop_unused_damage_states(self) -> None: # but the way it's written now does what we want in # each case. ): - ds_to_drop.append(damage_state) + ds_to_drop.append(damage_state) # noqa: PERF401 - self.loss_params.drop(columns=ds_to_drop, level=0, inplace=True) + self.loss_params = self.loss_params.drop(columns=ds_to_drop, level=0) - def _create_DV_RVs( + def _create_DV_RVs( # noqa: N802, C901 self, cases: pd.MultiIndex ) -> uq.RandomVariableRegistry | None: """ + Prepare the random variables. + Prepare the random variables associated with decision variables, such as repair cost and time. @@ -2123,14 +2123,7 @@ def _create_DV_RVs( random variables are generated (due to missing parameters or conditions), returns None. - Raises - ------ - ValueError - If an unrecognized loss driver type is encountered, - indicating a configuration or data input error. - """ - # Convert the MultiIndex to a DataFrame case_df = pd.DataFrame(index=cases).reset_index() # maps `cmp` to array of damage states @@ -2151,14 +2144,13 @@ def _create_DV_RVs( ) damaged_components = set(cases.get_level_values('cmp')) - RV_reg = uq.RandomVariableRegistry(self._asmnt.options.rng) + rv_reg = uq.RandomVariableRegistry(self._asmnt.options.rng) rv_count = 0 # for each component in the loss map - assert isinstance(self._loss_map, pd.DataFrame) - for component, consequence in self._loss_map['Repair'].items(): - + assert isinstance(self.loss_map, pd.DataFrame) + for component, consequence in self.loss_map['Repair'].items(): # if that component does not have realized damage states, # skip it (e.g., this can happen when there is only # `collapse`). @@ -2167,11 +2159,10 @@ def _create_DV_RVs( # for each DV for decision_variable in self.decision_variables: - # If loss parameters are missing for that consequence, # don't estimate losses for it. A warning has already # been issued for what is missing. - if (consequence, decision_variable) in self._missing: + if (consequence, decision_variable) in self.missing: continue # If loss parameters are missing for that consequence, @@ -2189,7 +2180,6 @@ def _create_DV_RVs( ) for ds in damage_states[component]: - if ds == '0': continue @@ -2209,11 +2199,11 @@ def _create_DV_RVs( if isinstance(ds_theta[0], str) and '|' in ds_theta[0]: ds_theta[0] = 1.0 - loc_dir_uid = loc_dir_uids[(component, ds)] + loc_dir_uid = loc_dir_uids[component, ds] for loc, direction, uid in loc_dir_uid: # assign RVs - RV_reg.add_RV( + rv_reg.add_RV( uq.rv_class_map(ds_family)( # type: ignore name=( f'{decision_variable}-{component}-' @@ -2228,7 +2218,7 @@ def _create_DV_RVs( # assign Time-Cost correlation whenever applicable rho = self._asmnt.options.rho_cost_time if rho != 0.0: - for rv_tag in RV_reg.RV: + for rv_tag in rv_reg.RV: if not rv_tag.startswith('Cost'): continue component = rv_tag.split('-')[1] @@ -2237,26 +2227,28 @@ def _create_DV_RVs( direction = rv_tag.split('-')[4] uid = rv_tag.split('-')[5] time_rv_tag = rv_tag.replace('Cost', 'Time') - if time_rv_tag in RV_reg.RV: - RV_reg.add_RV_set( + if time_rv_tag in rv_reg.RV: + rv_reg.add_RV_set( uq.RandomVariableSet( f'DV-{component}-{ds}-{loc}-{direction}-{uid}_set', - list(RV_reg.RVs([rv_tag, time_rv_tag]).values()), + list(rv_reg.RVs([rv_tag, time_rv_tag]).values()), np.array([[1.0, rho], [rho, 1.0]]), ) ) self.log.msg( - f"\n{rv_count} random variables created.", prepend_timestamp=False + f'\n{rv_count} random variables created.', prepend_timestamp=False ) if rv_count > 0: - return RV_reg + return rv_reg return None - def _calc_median_consequence(self, eco_qnt: pd.DataFrame) -> dict: + def _calc_median_consequence(self, eco_qnt: pd.DataFrame) -> dict: # noqa: C901 """ - Calculates the median repair consequences for each loss + Calculate median reiapr consequences. + + Calculate the median repair consequences for each loss component based on its quantity realizations and the associated loss parameters. @@ -2290,18 +2282,17 @@ def _calc_median_consequence(self, eco_qnt: pd.DataFrame) -> dict: If any loss driver types or distribution types are not recognized, or if the parameters are incomplete or unsupported. - """ + """ medians = {} for decision_variable in self.decision_variables: cmp_list = [] median_list = [] - assert self._loss_map is not None - for loss_cmp_id, loss_cmp_name in self._loss_map['Repair'].items(): - - if (loss_cmp_name, decision_variable) in self._missing: + assert self.loss_map is not None + for loss_cmp_id, loss_cmp_name in self.loss_map['Repair'].items(): + if (loss_cmp_name, decision_variable) in self.missing: continue if loss_cmp_id not in eco_qnt.columns.get_level_values(0).unique(): @@ -2320,31 +2311,32 @@ def _calc_median_consequence(self, eco_qnt: pd.DataFrame) -> dict: if ds_id == '0': continue - loss_params_DS = self.loss_params.loc[ + loss_params_ds = self.loss_params.loc[ (loss_cmp_name, decision_variable), ds ] # check if theta_0 is defined - theta_0 = loss_params_DS.get('Theta_0', np.nan) + theta_0 = loss_params_ds.get('Theta_0', np.nan) if pd.isna(theta_0): continue # check if the distribution type is supported - family = loss_params_DS.get('Family', np.nan) + family = loss_params_ds.get('Family', np.nan) if (not pd.isna(family)) and ( - family not in ['normal', 'lognormal', 'deterministic'] + family not in {'normal', 'lognormal', 'deterministic'} ): - raise ValueError( - f"Loss Distribution of type {family} " f"not supported." + msg = ( + f'Loss Distribution of type {family} ' f'not supported.' ) + raise ValueError(msg) # If theta_0 is a scalar try: theta_0 = float(theta_0) - if pd.isna(loss_params_DS.get('Family', np.nan)): + if pd.isna(loss_params_ds.get('Family', np.nan)): # if theta_0 is constant, then use it directly f_median = _prep_constant_median_DV(theta_0) @@ -2423,15 +2415,11 @@ def _calc_median_consequence(self, eco_qnt: pd.DataFrame) -> dict: class RepairModel_LF(RepairModel_Base): - """ - Manages repair consequences driven by components that are modeled - with Loss Functions (LF) - - """ + """Repair consequences for components with loss functions.""" __slots__ = [] - def _calculate( + def calculate( self, demand_sample: pd.DataFrame, cmp_sample: dict, @@ -2440,30 +2428,36 @@ def _calculate( nondirectional_multipliers: dict, ) -> None: """ - Calculate the repair consequences of each loss function-driven - component block in the asset. + Calculate repair consequences. Parameters ---------- demand_sample: pd.DataFrame The sample of the demand model to be used for the inputs of the loss functions. + cmp_sample: dict + Dict mapping each `cmp`-`loc`-`dir`-`uid` to the component + quantity realizations in the asset model in the form of + pd.Series objects. + cmp_marginal_params: pd.DataFrame + Dataframe containing component marginal distribution + parameters. + demand_offset: dict + Dictionary specifying the demand offset. + nondirectional_multipliers: dict + Dictionary specifying the non directional multipliers used + to combine the directional demands. - Raises - ------ - ValueError - When any Loss Driver is not recognized. """ - if self.loss_params is None: - return None + return - assert self._loss_map is not None - loss_map = self._loss_map['Repair'].to_dict() + assert self.loss_map is not None + loss_map = self.loss_map['Repair'].to_dict() sample_size = len(demand_sample) - # TODO: this can be taken out and simply passed as blocks in + # TODO(JVM): this can be taken out and simply passed as blocks in # the arguments, and cast to a dict in here. Index can be # obtained from there. index = [ @@ -2480,18 +2474,18 @@ def _calculate( performance_group_dict = {} for (component, location, direction, uid), num_blocks in blocks.items(): for decision_variable in self.decision_variables: - if (component, decision_variable) in self._missing: + if (component, decision_variable) in self.missing: continue performance_group_dict[ - ((component, decision_variable), location, direction, uid) + (component, decision_variable), location, direction, uid ] = num_blocks if not performance_group_dict: self.log.msg( - "No loss function-driven components---LF sample is set to None.", + 'No loss function-driven components---LF sample is set to None.', prepend_timestamp=False, ) - return None + return performance_group = pd.DataFrame( # type: ignore performance_group_dict.values(), @@ -2524,7 +2518,7 @@ def _calculate( ) self.log.msg( - "\nCalculating the median repair consequences...", + '\nCalculating the median repair consequences...', prepend_timestamp=False, ) @@ -2533,29 +2527,29 @@ def _calculate( ) self.log.msg( - "Successfully determined median repair consequences.", + 'Successfully determined median repair consequences.', prepend_timestamp=False, ) self.log.msg( - "\nConsidering deviations from the median values to obtain " - "random DV sample..." + '\nConsidering deviations from the median values to obtain ' + 'random DV sample...' ) self.log.msg( - "Preparing random variables for repair cost and time...", + 'Preparing random variables for repair cost and time...', prepend_timestamp=False, ) - RV_reg = self._create_DV_RVs(medians.columns) # type: ignore - if RV_reg is not None: + rv_reg = self._create_DV_RVs(medians.columns) # type: ignore + if rv_reg is not None: assert self._asmnt.options.sampling_method is not None - RV_reg.generate_sample( + rv_reg.generate_sample( sample_size=sample_size, method=self._asmnt.options.sampling_method ) std_sample = base.convert_to_MultiIndex( - pd.DataFrame(RV_reg.RV_sample), axis=1 + pd.DataFrame(rv_reg.RV_sample), axis=1 ) std_sample.columns.names = [ 'dv', @@ -2566,15 +2560,15 @@ def _calculate( 'uid', 'block', ] - std_sample.sort_index(axis=1, inplace=True) + std_sample = std_sample.sort_index(axis=1) sample = (medians * std_sample).combine_first(medians) else: sample = medians self.log.msg( - f"\nSuccessfully generated {sample_size} realizations of " - "deviation from the median consequences.", + f'\nSuccessfully generated {sample_size} realizations of ' + 'deviation from the median consequences.', prepend_timestamp=False, ) @@ -2583,20 +2577,17 @@ def _calculate( by=['dv', 'loss', 'dmg', 'loc', 'dir', 'uid'], axis=1 ).sum() - self.log.msg("Successfully obtained DV sample.", prepend_timestamp=False) + self.log.msg('Successfully obtained DV sample.', prepend_timestamp=False) self.sample = sample - return None + return - def _convert_loss_parameter_units(self) -> None: - """ - Converts previously loaded loss parameters to base units. - - """ + def convert_loss_parameter_units(self) -> None: + """Convert previously loaded loss parameters to base units.""" if self.loss_params is None: - return None - units = self.loss_params[('DV', 'Unit')] - arg_units = self.loss_params[('Demand', 'Unit')] + return + units = self.loss_params['DV', 'Unit'] + arg_units = self.loss_params['Demand', 'Unit'] column = 'LossFunction' params = self.loss_params[column].copy() assert isinstance(params, pd.DataFrame) @@ -2605,8 +2596,8 @@ def _convert_loss_parameter_units(self) -> None: units, arg_units, divide_units=False, - ).values - return None + ).to_numpy() + return def _calc_median_consequence( self, @@ -2617,6 +2608,8 @@ def _calc_median_consequence( cmp_sample: dict, ) -> pd.DataFrame: """ + Calculate median repair consequences. + Calculates the median repair consequences for each loss function-driven component based on its quantity realizations and the associated loss parameters. @@ -2650,8 +2643,12 @@ def _calc_median_consequence( Dataframe with medial loss for loss-function driven components. - """ + Raises + ------ + ValueError + If loss function interpolation fails. + """ medians_dict = {} # for each component in the asset model @@ -2669,51 +2666,51 @@ def _calc_median_consequence( ), blocks in performance_group['Blocks'].items(): consequence = loss_map[component] edp = required_edps[ - ((consequence, decision_variable), location, direction, uid) + (consequence, decision_variable), location, direction, uid ] edp_values = demand_dict[edp] assert self.loss_params is not None - loss_function_str = self.loss_params.at[ + loss_function_str = self.loss_params.loc[ (component, decision_variable), ('LossFunction', 'Theta_0') ] + assert isinstance(loss_function_str, str) try: median_loss = base.stringterpolation(loss_function_str)(edp_values) except ValueError as exc: - raise ValueError( + msg = ( f'Loss function interpolation for consequence ' f'`{consequence}-{decision_variable}` has failed. ' f'Ensure a sufficient interpolation domain ' f'for the X values (those after the `|` symbol) ' f'and verify the X-value and Y-value lengths match.' - ) from exc + ) + raise ValueError(msg) from exc for block in range(blocks): medians_dict[ - ( - decision_variable, - consequence, - component, - location, - direction, - uid, - str(block), - ) + decision_variable, + consequence, + component, + location, + direction, + uid, + str(block), ] = ( median_loss - * cmp_sample[component, location, direction, uid].values + * cmp_sample[component, location, direction, uid].to_numpy() / float(blocks) ) medians = pd.DataFrame(medians_dict) medians.columns.names = ['dv', 'loss', 'dmg', 'loc', 'dir', 'uid', 'block'] - medians.sort_index(axis=1, inplace=True) + return medians.sort_index(axis=1) - return medians - - def _create_DV_RVs( + def _create_DV_RVs( # noqa: N802 self, cases: pd.MultiIndex ) -> uq.RandomVariableRegistry | None: """ - Prepare the random variables associated with decision + Prepare the decision variable random variables. + + Prepares the random variables associated with decision variables, such as repair cost and time. Parameters @@ -2732,8 +2729,7 @@ def _create_DV_RVs( or conditions), returns None. """ - - RV_reg = uq.RandomVariableRegistry(self._asmnt.options.rng) + rv_reg = uq.RandomVariableRegistry(self._asmnt.options.rng) rv_count = 0 @@ -2747,7 +2743,6 @@ def _create_DV_RVs( uid, block, ) in cases: - # load the corresponding parameters assert self.loss_params is not None parameters = self.loss_params.loc[(consequence, decision_variable), :] @@ -2755,7 +2750,7 @@ def _create_DV_RVs( if ('LossFunction', 'Family') not in parameters: # Everything is deterministic, no need to create RVs. continue - family = parameters.at[('LossFunction', 'Family')] + family = parameters.loc['LossFunction', 'Family'] theta = [ parameters.get(('LossFunction', f'Theta_{t_i}'), np.nan) for t_i in range(3) @@ -2771,7 +2766,7 @@ def _create_DV_RVs( theta[0] = 1.0 # assign RVs - RV_reg.add_RV( + rv_reg.add_RV( uq.rv_class_map(family)( # type: ignore name=( f'{decision_variable}-{consequence}-' @@ -2786,7 +2781,7 @@ def _create_DV_RVs( # assign Time-Cost correlation whenever applicable rho = self._asmnt.options.rho_cost_time if rho != 0.0: - for rv_tag in RV_reg.RV: + for rv_tag in rv_reg.RV: if not rv_tag.startswith('Cost'): continue split = rv_tag.split('-') @@ -2797,30 +2792,30 @@ def _create_DV_RVs( uid = split[5] block = split[6] time_rv_tag = rv_tag.replace('Cost', 'Time') - if time_rv_tag in RV_reg.RV: - RV_reg.add_RV_set( + if time_rv_tag in rv_reg.RV: + rv_reg.add_RV_set( uq.RandomVariableSet( ( f'DV-{consequence}-{component}-' f'{location}-{direction}-{uid}-{block}_set' ), - list(RV_reg.RVs([rv_tag, time_rv_tag]).values()), + list(rv_reg.RVs([rv_tag, time_rv_tag]).values()), np.array([[1.0, rho], [rho, 1.0]]), ) ) self.log.msg( - f"\n{rv_count} random variables created.", prepend_timestamp=False + f'\n{rv_count} random variables created.', prepend_timestamp=False ) if rv_count > 0: - return RV_reg + return rv_reg return None -def _prep_constant_median_DV(median: float) -> Callable: +def _prep_constant_median_DV(median: float) -> Callable: # noqa: N802 """ - Returns a constant median Decision Variable (DV) function. + Return a constant median Decision Variable (DV) function. Parameters ---------- @@ -2835,20 +2830,17 @@ def _prep_constant_median_DV(median: float) -> Callable: """ - def f(*args): - # pylint: disable=unused-argument - # pylint: disable=missing-return-doc - # pylint: disable=missing-return-type-doc + def f(*args): # noqa: ANN002, ANN202, ARG001 return median return f -def _prep_bounded_multilinear_median_DV( +def _prep_bounded_multilinear_median_DV( # noqa: N802 medians: np.ndarray, quantities: np.ndarray ) -> Callable: """ - Returns a bounded multilinear median Decision Variable (DV) function. + Return a bounded multilinear median Decision Variable (DV) function. The median DV equals the min and max values when the quantity is outside of the prescribed quantity bounds. When the quantity is within the @@ -2869,39 +2861,57 @@ def _prep_bounded_multilinear_median_DV( callable A function that returns the median DV given the quantity of damaged components. + """ - def f(quantity): - # pylint: disable=missing-return-doc - # pylint: disable=missing-return-type-doc + def f(quantity): # noqa: ANN001, ANN202 if quantity is None: - raise ValueError( + msg = ( 'A bounded linear median Decision Variable function called ' 'without specifying the quantity of damaged components' ) + raise ValueError(msg) q_array = np.asarray(quantity, dtype=np.float64) # calculate the median consequence given the quantity of damaged # components - output = np.interp(q_array, quantities, medians) - - return output + return np.interp(q_array, quantities, medians) return f def _is_for_lf_model(data: pd.DataFrame) -> bool: """ - Determines if the specified loss model parameters are for - components modeled with Loss Functions (LF). + Determine if the data are for the lf_model. + + Parameters + ---------- + data: pd.DataFrame + Data to be checked. + + Returns + ------- + bool + Whether the data are for the lf_model. + """ return 'LossFunction' in data.columns.get_level_values(0) def _is_for_ds_model(data: pd.DataFrame) -> bool: """ - Determines if the specified loss model parameters are for - components modeled with discrete Damage States (DS). + Determine if the data are for the ds_model. + + Parameters + ---------- + data: pd.DataFrame + Data to be checked. + + Returns + ------- + bool + Whether the data are for the ds_model. + """ return 'DS1' in data.columns.get_level_values(0) diff --git a/pelicun/model/pelicun_model.py b/pelicun/model/pelicun_model.py index 1d743a206..e886ad06a 100644 --- a/pelicun/model/pelicun_model.py +++ b/pelicun/model/pelicun_model.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,18 +37,17 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -This file defines the PelicunModel object and its methods. -""" +"""PelicunModel object and associated methods.""" from __future__ import annotations -from typing import TYPE_CHECKING -from typing import Any + +from typing import TYPE_CHECKING, Any + import numpy as np import pandas as pd -from pelicun import base -from pelicun import uq + +from pelicun import base, uq if TYPE_CHECKING: from pelicun.assessment import AssessmentBase @@ -58,14 +56,20 @@ class PelicunModel: - """ - Generic model class to manage methods shared between all models in Pelicun. - - """ + """Generic model class to manage methods shared between all models in Pelicun.""" __slots__ = ['_asmnt', 'log'] - def __init__(self, assessment: AssessmentBase): + def __init__(self, assessment: AssessmentBase) -> None: + """ + Instantiate PelicunModel objects. + + Parameters + ---------- + assessment: Assessment + Parent assessment object. + + """ # link the PelicunModel object to its AssessmentBase object self._asmnt: AssessmentBase = assessment @@ -73,16 +77,17 @@ def __init__(self, assessment: AssessmentBase): # concise syntax self.log = self._asmnt.log - def _convert_marginal_params( + def _convert_marginal_params( # noqa: C901 self, marginal_params: pd.DataFrame, units: pd.Series, arg_units: pd.Series | None = None, + *, divide_units: bool = True, inverse_conversion: bool = False, ) -> pd.DataFrame: """ - Converts the parameters of marginal distributions in a model to SI units. + Convert the parameters of marginal distributions in a model to SI units. Parameters ---------- @@ -143,10 +148,10 @@ def _convert_marginal_params( marginal_params[col_name] = np.nan # get a list of unique units - unique_units = units.unique() + unique_units = units.dropna().unique() # for each unit - for unit_name in unique_units: + for unit_name in unique_units: # noqa: PLR1702 # get the scale factor for converting from the source unit unit_factor = self._asmnt.calc_unit_scale_factor(unit_name) @@ -156,7 +161,7 @@ def _convert_marginal_params( # for each variable for row_id in unit_ids: # pull the parameters of the marginal distribution - family = marginal_params.at[row_id, 'Family'] + family = marginal_params.loc[row_id, 'Family'] if family == 'empirical': continue @@ -164,7 +169,7 @@ def _convert_marginal_params( # load the theta values theta = marginal_params.loc[ row_id, ['Theta_0', 'Theta_1', 'Theta_2'] - ].values + ].to_numpy() # for each theta args: list[Any] = [] @@ -225,7 +230,10 @@ def _convert_marginal_params( if inverse_conversion: conversion_factor = 1.00 / conversion_factor theta, tr_limits = uq.scale_distribution( # type: ignore - conversion_factor, family, theta, tr_limits # type: ignore + conversion_factor, + family, + theta, + tr_limits, # type: ignore ) # convert multilinear function parameters back into strings @@ -234,7 +242,7 @@ def _convert_marginal_params( theta[a_i] = '|'.join( [ ','.join([f'{val:g}' for val in vals]) - for vals in (theta[a_i], args[a_i]) + for vals in (theta[a_i], arg) ] ) @@ -248,14 +256,14 @@ def _convert_marginal_params( ) # remove the added columns - marginal_params = marginal_params[original_cols] - - return marginal_params + return marginal_params[original_cols] def _get_locations(self, loc_str: str) -> np.ndarray: """ - Parses a location string to determine specific sections of - an asset to be processed. + Parse a location string. + + Parses a location string to determine specific sections of an + asset to be processed. This function interprets various string formats to output a list of strings representing sections or parts of the @@ -265,7 +273,7 @@ def _get_locations(self, loc_str: str) -> np.ndarray: Parameters ---------- - loc_str : str + loc_str: str A string that describes the location or range of sections in the asset. It can be a single number, a range, a comma-separated list, 'all', 'top', or @@ -307,6 +315,7 @@ def _get_locations(self, loc_str: str) -> np.ndarray: >>> _get_locations('roof') array(['11']) + """ try: res = str(int(float(loc_str))) @@ -315,33 +324,36 @@ def _get_locations(self, loc_str: str) -> np.ndarray: except ValueError as exc: stories = self._asmnt.stories - if "--" in loc_str: + if '--' in loc_str: s_low, s_high = loc_str.split('--') s_low = self._get_locations(s_low)[0] s_high = self._get_locations(s_high)[0] return np.arange(int(s_low), int(s_high) + 1).astype(str) - if "," in loc_str: + if ',' in loc_str: return np.array(loc_str.split(','), dtype=int).astype(str) - if loc_str == "all": + if loc_str == 'all': assert stories is not None return np.arange(1, stories + 1).astype(str) - if loc_str == "top": + if loc_str == 'top': assert stories is not None return np.array([stories]).astype(str) - if loc_str == "roof": + if loc_str == 'roof': assert stories is not None return np.array([stories + 1]).astype(str) - raise ValueError(f"Cannot parse location string: " f"{loc_str}") from exc + msg = f'Cannot parse location string: ' f'{loc_str}' + raise ValueError(msg) from exc def _get_directions(self, dir_str: str | None) -> np.ndarray: """ - Parses a direction string to determine specific - orientations or directions applicable within an asset. + Parse a direction string. + + Parses a direction string to determine specific orientations + or directions applicable within an asset. This function processes direction descriptions to output an array of strings, each representing a specific @@ -351,7 +363,7 @@ def _get_directions(self, dir_str: str | None) -> np.ndarray: Parameters ---------- - dir_str : str or None + dir_str: str or None A string that describes the direction or range of directions in the asset. It can be a single number, a range, a comma-separated list, or it can be null, @@ -388,8 +400,9 @@ def _get_directions(self, dir_str: str | None) -> np.ndarray: >>> get_directions('1,2,5') array(['1', '2', '5']) + """ - if pd.isnull(dir_str): + if pd.isna(dir_str): return np.ones(1).astype(str) try: @@ -397,24 +410,21 @@ def _get_directions(self, dir_str: str | None) -> np.ndarray: return np.array([res]) except ValueError as exc: - if "," in dir_str: # type: ignore + if ',' in dir_str: # type: ignore return np.array( dir_str.split(','), # type: ignore dtype=int, - ).astype( - str - ) # type: ignore + ).astype(str) # type: ignore - if "--" in dir_str: # type: ignore + if '--' in dir_str: # type: ignore d_low, d_high = dir_str.split('--') # type: ignore d_low = self._get_directions(d_low)[0] d_high = self._get_directions(d_high)[0] return np.arange(int(d_low), int(d_high) + 1).astype(str) # else: - raise ValueError( - f"Cannot parse direction string: " f"{dir_str}" - ) from exc + msg = f'Cannot parse direction string: ' f'{dir_str}' + raise ValueError(msg) from exc def query_error_setup(self, path: str) -> str | bool: """ diff --git a/pelicun/warnings.py b/pelicun/pelicun_warnings.py similarity index 87% rename from pelicun/warnings.py rename to pelicun/pelicun_warnings.py index 4d81cc97e..399f32ab7 100644 --- a/pelicun/warnings.py +++ b/pelicun/pelicun_warnings.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -33,16 +32,8 @@ # # You should have received a copy of the BSD 3-Clause License along with # pelicun. If not, see . -# -# Contributors: -# Adam Zsarnóczay -# John Vouvakis Manousakis - -""" -This module defines pelicun warning classes and relevant methods. -It also defines pelicun errors. -""" +"""Pelicun warning and error classes.""" from __future__ import annotations @@ -57,11 +48,15 @@ class PelicunInvalidConfigError(Exception): Attributes ---------- - message : str + message: str Explanation of the error. + """ - def __init__(self, message="Invalid options in configuration file."): + def __init__( + self, message: str = 'Invalid options in configuration file.' + ) -> None: + """Instantiate the error.""" self.message = message super().__init__(self.message) @@ -76,7 +71,11 @@ class InconsistentUnitsError(Exception): Explanation of the error. """ - def __init__(self, message="Inconsistent units.", file=None): + def __init__( + self, message: str = 'Inconsistent units.', file: str | None = None + ) -> None: + self.message: str + if file: self.message = f'{self.message}\n' f'File: {file}' else: diff --git a/pelicun/resources/SimCenterDBDL/damage_DB_FEMA_P58_2nd.json b/pelicun/resources/SimCenterDBDL/damage_DB_FEMA_P58_2nd.json index e19590231..f09ce6df4 100644 --- a/pelicun/resources/SimCenterDBDL/damage_DB_FEMA_P58_2nd.json +++ b/pelicun/resources/SimCenterDBDL/damage_DB_FEMA_P58_2nd.json @@ -71,7 +71,7 @@ "D.50.92 - Other Electrical Systems" ] }, - "E - Equipments and furnishings": { + "E - Equipment and furnishings": { "E.20 - Furnishings": [ "E.20.22 - Movable Furnishings" ] @@ -8812,7 +8812,7 @@ "LS2": { "DS2": { "Description": "Structural damage but live load capacity remains intact. Buckling of steel, weld cracking.", - "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finsihes." + "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finishes." } }, "LS3": { @@ -8838,7 +8838,7 @@ "LS2": { "DS2": { "Description": "Buckling of steel, weld cracking.", - "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finsihes." + "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finishes." } }, "LS3": { @@ -10147,7 +10147,7 @@ }, "D.20.21.011a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10167,7 +10167,7 @@ }, "D.20.21.011b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10187,7 +10187,7 @@ }, "D.20.21.012a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10207,7 +10207,7 @@ }, "D.20.21.012b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10227,7 +10227,7 @@ }, "D.20.21.013a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10247,7 +10247,7 @@ }, "D.20.21.013b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10261,7 +10261,7 @@ }, "D.20.21.014a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10281,7 +10281,7 @@ }, "D.20.21.014b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10295,7 +10295,7 @@ }, "D.20.21.021a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC A or B, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10315,7 +10315,7 @@ }, "D.20.21.022a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC C, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10335,7 +10335,7 @@ }, "D.20.21.023a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10355,7 +10355,7 @@ }, "D.20.21.023b": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10375,7 +10375,7 @@ }, "D.20.21.024a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F (OSPHD or sim), PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10395,7 +10395,7 @@ }, "D.20.21.024b": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F (OSPHD or sim), BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10415,7 +10415,7 @@ }, "D.20.22.011a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10435,7 +10435,7 @@ }, "D.20.22.011b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10455,7 +10455,7 @@ }, "D.20.22.012a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10475,7 +10475,7 @@ }, "D.20.22.012b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10495,7 +10495,7 @@ }, "D.20.22.013a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10515,7 +10515,7 @@ }, "D.20.22.013b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10529,7 +10529,7 @@ }, "D.20.22.014a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10549,7 +10549,7 @@ }, "D.20.22.014b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10563,7 +10563,7 @@ }, "D.20.22.021a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10583,7 +10583,7 @@ }, "D.20.22.022a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC C, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10603,7 +10603,7 @@ }, "D.20.22.023a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10623,7 +10623,7 @@ }, "D.20.22.023b": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10643,7 +10643,7 @@ }, "D.20.22.024a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10663,7 +10663,7 @@ }, "D.20.22.024b": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10683,7 +10683,7 @@ }, "D.20.31.011b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC A,B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10703,7 +10703,7 @@ }, "D.20.31.012b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10723,7 +10723,7 @@ }, "D.20.31.013b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC D,E,F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10737,7 +10737,7 @@ }, "D.20.31.014b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC D,E,F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10751,7 +10751,7 @@ }, "D.20.31.021a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC A,B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10765,7 +10765,7 @@ }, "D.20.31.021b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC A,B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10785,7 +10785,7 @@ }, "D.20.31.022a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10799,7 +10799,7 @@ }, "D.20.31.022b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10819,7 +10819,7 @@ }, "D.20.31.023a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10833,7 +10833,7 @@ }, "D.20.31.023b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10847,7 +10847,7 @@ }, "D.20.31.024a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10861,7 +10861,7 @@ }, "D.20.31.024b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10875,7 +10875,7 @@ }, "D.20.51.011a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10895,7 +10895,7 @@ }, "D.20.51.011b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10915,7 +10915,7 @@ }, "D.20.51.012a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10935,7 +10935,7 @@ }, "D.20.51.012b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10955,7 +10955,7 @@ }, "D.20.51.013a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10975,7 +10975,7 @@ }, "D.20.51.013b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10989,7 +10989,7 @@ }, "D.20.51.014a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11009,7 +11009,7 @@ }, "D.20.51.014b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11023,7 +11023,7 @@ }, "D.20.51.021a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11043,7 +11043,7 @@ }, "D.20.51.021b": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11057,7 +11057,7 @@ }, "D.20.51.022a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11077,7 +11077,7 @@ }, "D.20.51.023a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11097,7 +11097,7 @@ }, "D.20.51.023b": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11117,7 +11117,7 @@ }, "D.20.51.024a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11137,7 +11137,7 @@ }, "D.20.51.024b": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11157,7 +11157,7 @@ }, "D.20.61.011a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11177,7 +11177,7 @@ }, "D.20.61.011b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11197,7 +11197,7 @@ }, "D.20.61.012a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11217,7 +11217,7 @@ }, "D.20.61.012b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11237,7 +11237,7 @@ }, "D.20.61.013a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11257,7 +11257,7 @@ }, "D.20.61.013b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11271,7 +11271,7 @@ }, "D.20.61.014a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11291,7 +11291,7 @@ }, "D.20.61.014b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11305,7 +11305,7 @@ }, "D.20.61.021a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11325,7 +11325,7 @@ }, "D.20.61.022a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11345,7 +11345,7 @@ }, "D.20.61.023a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11365,7 +11365,7 @@ }, "D.20.61.023b": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11385,7 +11385,7 @@ }, "D.20.61.024a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11405,7 +11405,7 @@ }, "D.20.61.024b": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -16573,4 +16573,4 @@ } } } -} \ No newline at end of file +} diff --git a/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_bldg.json b/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_bldg.json index c92fd79c6..337d89947 100644 --- a/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_bldg.json +++ b/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_bldg.json @@ -3770,7 +3770,7 @@ }, "STR.URM.L.LC": { "Description": "Structural, Unreinforced Masonry Bearing Walls, Low-Rise, Low-Code", - "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", + "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -3801,7 +3801,7 @@ }, "STR.URM.L.PC": { "Description": "Structural, Unreinforced Masonry Bearing Walls, Low-Rise, Pre-Code", - "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", + "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -3832,7 +3832,7 @@ }, "STR.URM.M.LC": { "Description": "Structural, Unreinforced Masonry Bearing Walls, Mid-Rise, Low-Code", - "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", + "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -3863,7 +3863,7 @@ }, "STR.URM.M.PC": { "Description": "Structural, Unreinforced Masonry Bearing Walls, Mid-Rise, Pre-Code", - "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", + "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -7878,7 +7878,7 @@ }, "LF.URM.L.LC": { "Description": "Lifeline Facilities, Unreinforced Masonry Bearing Walls, Low-Rise, Low-Code", - "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", + "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -7909,7 +7909,7 @@ }, "LF.URM.L.PC": { "Description": "Lifeline Facilities, Unreinforced Masonry Bearing Walls, Low-Rise, Pre-Code", - "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", + "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -7940,7 +7940,7 @@ }, "LF.URM.M.LC": { "Description": "Lifeline Facilities, Unreinforced Masonry Bearing Walls, Mid-Rise, Low-Code", - "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", + "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -7971,7 +7971,7 @@ }, "LF.URM.M.PC": { "Description": "Lifeline Facilities, Unreinforced Masonry Bearing Walls, Mid-Rise, Pre-Code", - "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", + "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -8188,4 +8188,4 @@ } } } -} \ No newline at end of file +} diff --git a/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_story.json b/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_story.json index bd78db139..83ef5139a 100644 --- a/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_story.json +++ b/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_story.json @@ -1662,7 +1662,7 @@ }, "STR.URM.LC": { "Description": "Structural, Unreinforced Masonry Bearing Walls, Low-Rise, Low-Code", - "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", + "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -1693,7 +1693,7 @@ }, "STR.URM.PC": { "Description": "Structural, Unreinforced Masonry Bearing Walls, Low-Rise, Pre-Code", - "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", + "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -2050,4 +2050,4 @@ } } } -} \ No newline at end of file +} diff --git a/pelicun/resources/SimCenterDBDL/loss_repair_DB_FEMA_P58_2nd.json b/pelicun/resources/SimCenterDBDL/loss_repair_DB_FEMA_P58_2nd.json index a715e45b2..db558ac72 100644 --- a/pelicun/resources/SimCenterDBDL/loss_repair_DB_FEMA_P58_2nd.json +++ b/pelicun/resources/SimCenterDBDL/loss_repair_DB_FEMA_P58_2nd.json @@ -71,7 +71,7 @@ "D.50.92 - Other Electrical Systems" ] }, - "E - Equipments and furnishings": { + "E - Equipment and furnishings": { "E.20 - Furnishings": [ "E.20.22 - Movable Furnishings" ] @@ -7278,7 +7278,7 @@ }, "DS2": { "Description": "Structural damage but live load capacity remains intact. Buckling of steel, weld cracking.", - "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finsihes." + "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finishes." }, "DS3": { "Description": "Loss of live load capacity. Connection and or weld fracture.", @@ -7299,7 +7299,7 @@ }, "DS2": { "Description": "Buckling of steel, weld cracking.", - "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finsihes." + "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finishes." }, "DS3": { "Description": "Loss of live load capacity. Connection and or weld fracture.", @@ -8453,7 +8453,7 @@ }, "D.20.21.011a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8470,7 +8470,7 @@ }, "D.20.21.011b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8487,7 +8487,7 @@ }, "D.20.21.012a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8504,7 +8504,7 @@ }, "D.20.21.012b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8521,7 +8521,7 @@ }, "D.20.21.013a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8538,7 +8538,7 @@ }, "D.20.21.013b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8551,7 +8551,7 @@ }, "D.20.21.014a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8568,7 +8568,7 @@ }, "D.20.21.014b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8581,7 +8581,7 @@ }, "D.20.21.021a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC A or B, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8598,7 +8598,7 @@ }, "D.20.21.022a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC C, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8615,7 +8615,7 @@ }, "D.20.21.023a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8632,7 +8632,7 @@ }, "D.20.21.023b": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8649,7 +8649,7 @@ }, "D.20.21.024a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F (OSPHD or sim), PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8666,7 +8666,7 @@ }, "D.20.21.024b": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F (OSPHD or sim), BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8683,7 +8683,7 @@ }, "D.20.22.011a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8700,7 +8700,7 @@ }, "D.20.22.011b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8717,7 +8717,7 @@ }, "D.20.22.012a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8734,7 +8734,7 @@ }, "D.20.22.012b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8751,7 +8751,7 @@ }, "D.20.22.013a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8768,7 +8768,7 @@ }, "D.20.22.013b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8781,7 +8781,7 @@ }, "D.20.22.014a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8798,7 +8798,7 @@ }, "D.20.22.014b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8811,7 +8811,7 @@ }, "D.20.22.021a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8828,7 +8828,7 @@ }, "D.20.22.022a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC C, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8845,7 +8845,7 @@ }, "D.20.22.023a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8862,7 +8862,7 @@ }, "D.20.22.023b": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8879,7 +8879,7 @@ }, "D.20.22.024a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8896,7 +8896,7 @@ }, "D.20.22.024b": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8913,7 +8913,7 @@ }, "D.20.31.011b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC A,B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8930,7 +8930,7 @@ }, "D.20.31.012b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8947,7 +8947,7 @@ }, "D.20.31.013b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC D,E,F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8960,7 +8960,7 @@ }, "D.20.31.014b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC D,E,F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8973,7 +8973,7 @@ }, "D.20.31.021a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC A,B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8986,7 +8986,7 @@ }, "D.20.31.021b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC A,B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9003,7 +9003,7 @@ }, "D.20.31.022a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9016,7 +9016,7 @@ }, "D.20.31.022b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9033,7 +9033,7 @@ }, "D.20.31.023a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9046,7 +9046,7 @@ }, "D.20.31.023b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9059,7 +9059,7 @@ }, "D.20.31.024a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9072,7 +9072,7 @@ }, "D.20.31.024b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9085,7 +9085,7 @@ }, "D.20.51.011a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9102,7 +9102,7 @@ }, "D.20.51.011b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9119,7 +9119,7 @@ }, "D.20.51.012a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9136,7 +9136,7 @@ }, "D.20.51.012b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9153,7 +9153,7 @@ }, "D.20.51.013a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9170,7 +9170,7 @@ }, "D.20.51.013b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9183,7 +9183,7 @@ }, "D.20.51.014a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9200,7 +9200,7 @@ }, "D.20.51.014b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9213,7 +9213,7 @@ }, "D.20.51.021a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9230,7 +9230,7 @@ }, "D.20.51.021b": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9243,7 +9243,7 @@ }, "D.20.51.022a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9260,7 +9260,7 @@ }, "D.20.51.023a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9277,7 +9277,7 @@ }, "D.20.51.023b": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9294,7 +9294,7 @@ }, "D.20.51.024a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9311,7 +9311,7 @@ }, "D.20.51.024b": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9328,7 +9328,7 @@ }, "D.20.61.011a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9345,7 +9345,7 @@ }, "D.20.61.011b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9362,7 +9362,7 @@ }, "D.20.61.012a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9379,7 +9379,7 @@ }, "D.20.61.012b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9396,7 +9396,7 @@ }, "D.20.61.013a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9413,7 +9413,7 @@ }, "D.20.61.013b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9426,7 +9426,7 @@ }, "D.20.61.014a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9443,7 +9443,7 @@ }, "D.20.61.014b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9456,7 +9456,7 @@ }, "D.20.61.021a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9473,7 +9473,7 @@ }, "D.20.61.022a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9490,7 +9490,7 @@ }, "D.20.61.023a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9507,7 +9507,7 @@ }, "D.20.61.023b": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9524,7 +9524,7 @@ }, "D.20.61.024a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9541,7 +9541,7 @@ }, "D.20.61.024b": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -14006,4 +14006,4 @@ } } } -} \ No newline at end of file +} diff --git a/pelicun/resources/auto/Hazus_Earthquake_IM.py b/pelicun/resources/auto/Hazus_Earthquake_IM.py index a2ea3f929..c674bb8c4 100644 --- a/pelicun/resources/auto/Hazus_Earthquake_IM.py +++ b/pelicun/resources/auto/Hazus_Earthquake_IM.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2023 Leland Stanford Junior University # Copyright (c) 2023 The Regents of the University of California @@ -36,23 +35,28 @@ # # Contributors: # Adam Zsarnóczay +"""Hazus Earthquake IM.""" + from __future__ import annotations + import json + import pandas as pd + import pelicun -ap_DesignLevel = {1940: 'LC', 1975: 'MC', 2100: 'HC'} +ap_design_level = {1940: 'LC', 1975: 'MC', 2100: 'HC'} # original: # ap_DesignLevel = {1940: 'PC', 1940: 'LC', 1975: 'MC', 2100: 'HC'} # Note that the duplicated key is ignored, and Python keeps the last # entry. -ap_DesignLevel_W1 = {0: 'LC', 1975: 'MC', 2100: 'HC'} +ap_design_level_w1 = {0: 'LC', 1975: 'MC', 2100: 'HC'} # original: # ap_DesignLevel_W1 = {0: 'PC', 0: 'LC', 1975: 'MC', 2100: 'HC'} # same thing applies -ap_Occupancy = { +ap_occupancy = { 'Other/Unknown': 'RES3', 'Residential - Single-Family': 'RES1', 'Residential - Town-Home': 'RES3', @@ -70,7 +74,10 @@ # Convert common length units -def convertUnits(value, unit_in, unit_out): +def convertUnits(value, unit_in, unit_out): # noqa: N802 + """ + Convert units. + """ aval_types = ['m', 'mm', 'cm', 'km', 'inch', 'ft', 'mile'] m = 1.0 mm = 0.001 * m @@ -89,18 +96,13 @@ def convertUnits(value, unit_in, unit_out): 'mile': mile, } if (unit_in not in aval_types) or (unit_out not in aval_types): - print( - f"The unit {unit_in} or {unit_out} " - f"are used in auto_population but not supported" - ) - return - value = value * scale_map[unit_in] / scale_map[unit_out] - return value + return None + return value * scale_map[unit_in] / scale_map[unit_out] -def convertBridgeToHAZUSclass(AIM): +def convertBridgeToHAZUSclass(aim): # noqa: C901, N802 # TODO: replace labels in AIM with standard CamelCase versions - structureType = AIM["BridgeClass"] + structure_type = aim['BridgeClass'] # if ( # type(structureType) == str # and len(structureType) > 3 @@ -109,98 +111,67 @@ def convertBridgeToHAZUSclass(AIM): # and 29 > int(structureType[3:]) # ): # return AIM["bridge_class"] - state = AIM["StateCode"] - yr_built = AIM["YearBuilt"] - num_span = AIM["NumOfSpans"] - len_max_span = AIM["MaxSpanLength"] - len_unit = AIM["units"]["length"] - len_max_span = convertUnits(len_max_span, len_unit, "m") + state = aim['StateCode'] + yr_built = aim['YearBuilt'] + num_span = aim['NumOfSpans'] + len_max_span = aim['MaxSpanLength'] + len_unit = aim['units']['length'] + len_max_span = convertUnits(len_max_span, len_unit, 'm') seismic = (int(state) == 6 and int(yr_built) >= 1975) or ( int(state) != 6 and int(yr_built) >= 1990 ) # Use a catch-all, other class by default - bridge_class = "HWB28" + bridge_class = 'HWB28' if len_max_span > 150: - if not seismic: - bridge_class = "HWB1" - else: - bridge_class = "HWB2" + bridge_class = 'HWB1' if not seismic else 'HWB2' elif num_span == 1: - if not seismic: - bridge_class = "HWB3" - else: - bridge_class = "HWB4" + bridge_class = 'HWB3' if not seismic else 'HWB4' - elif structureType in list(range(101, 107)): - if not seismic: - if state != 6: - bridge_class = "HWB5" - else: - bridge_class = "HWB6" - else: - bridge_class = "HWB7" + elif structure_type in list(range(101, 107)): + bridge_class = ('HWB5' if state != 6 else 'HWB6') if not seismic else 'HWB7' - elif structureType in [205, 206]: - if not seismic: - bridge_class = "HWB8" - else: - bridge_class = "HWB9" + elif structure_type in [205, 206]: + bridge_class = 'HWB8' if not seismic else 'HWB9' - elif structureType in list(range(201, 207)): - if not seismic: - bridge_class = "HWB10" - else: - bridge_class = "HWB11" + elif structure_type in list(range(201, 207)): + bridge_class = 'HWB10' if not seismic else 'HWB11' - elif structureType in list(range(301, 307)): + elif structure_type in list(range(301, 307)): if not seismic: if len_max_span >= 20: - if state != 6: - bridge_class = "HWB12" - else: - bridge_class = "HWB13" + bridge_class = 'HWB12' if state != 6 else 'HWB13' + elif state != 6: + bridge_class = 'HWB24' else: - if state != 6: - bridge_class = "HWB24" - else: - bridge_class = "HWB25" + bridge_class = 'HWB25' else: - bridge_class = "HWB14" + bridge_class = 'HWB14' - elif structureType in list(range(402, 411)): + elif structure_type in list(range(402, 411)): if not seismic: if len_max_span >= 20: - bridge_class = "HWB15" + bridge_class = 'HWB15' elif state != 6: - bridge_class = "HWB26" + bridge_class = 'HWB26' else: - bridge_class = "HWB27" + bridge_class = 'HWB27' else: - bridge_class = "HWB16" + bridge_class = 'HWB16' - elif structureType in list(range(501, 507)): + elif structure_type in list(range(501, 507)): if not seismic: - if state != 6: - bridge_class = "HWB17" - else: - bridge_class = "HWB18" + bridge_class = 'HWB17' if state != 6 else 'HWB18' else: - bridge_class = "HWB19" + bridge_class = 'HWB19' - elif structureType in [605, 606]: - if not seismic: - bridge_class = "HWB20" - else: - bridge_class = "HWB21" + elif structure_type in [605, 606]: + bridge_class = 'HWB20' if not seismic else 'HWB21' - elif structureType in list(range(601, 608)): - if not seismic: - bridge_class = "HWB22" - else: - bridge_class = "HWB23" + elif structure_type in list(range(601, 608)): + bridge_class = 'HWB22' if not seismic else 'HWB23' # TODO: review and add HWB24-27 rules # TODO: also double check rules for HWB10-11 and HWB22-23 @@ -208,30 +179,30 @@ def convertBridgeToHAZUSclass(AIM): return bridge_class -def convertTunnelToHAZUSclass(AIM): - if ("Bored" in AIM["ConstructType"]) or ("Drilled" in AIM["ConstructType"]): - return "HTU1" - elif ("Cut" in AIM["ConstructType"]) or ("Cover" in AIM["ConstructType"]): - return "HTU2" +def convertTunnelToHAZUSclass(aim) -> str: # noqa: N802 + if ('Bored' in aim['ConstructType']) or ('Drilled' in aim['ConstructType']): + return 'HTU1' + elif ('Cut' in aim['ConstructType']) or ('Cover' in aim['ConstructType']): + return 'HTU2' else: # Select HTU2 for unclassified tunnels because it is more conservative. - return "HTU2" + return 'HTU2' -def convertRoadToHAZUSclass(AIM): - if AIM["RoadType"] in ["Primary", "Secondary"]: - return "HRD1" +def convertRoadToHAZUSclass(aim) -> str: # noqa: N802 + if aim['RoadType'] in ['Primary', 'Secondary']: + return 'HRD1' - elif AIM["RoadType"] == "Residential": - return "HRD2" + elif aim['RoadType'] == 'Residential': + return 'HRD2' else: # many unclassified roads are urban roads - return "HRD2" + return 'HRD2' -def convert_story_rise(structureType, stories): - if structureType in ['W1', 'W2', 'S3', 'PC1', 'MH']: +def convert_story_rise(structure_type, stories): + if structure_type in ['W1', 'W2', 'S3', 'PC1', 'MH']: # These archetypes have no rise information in their IDs rise = None @@ -241,26 +212,19 @@ def convert_story_rise(structureType, stories): stories = int(stories) except (ValueError, TypeError): - raise ValueError( + msg = ( 'Missing "NumberOfStories" information, ' 'cannot infer `rise` attribute of archetype' ) + raise ValueError(msg) - if structureType == 'RM1': - if stories <= 3: - rise = "L" - - else: - rise = "M" - - elif structureType == 'URM': - if stories <= 2: - rise = "L" + if structure_type == 'RM1': + rise = 'L' if stories <= 3 else 'M' - else: - rise = "M" + elif structure_type == 'URM': + rise = 'L' if stories <= 2 else 'M' - elif structureType in [ + elif structure_type in [ 'S1', 'S2', 'S4', @@ -272,18 +236,18 @@ def convert_story_rise(structureType, stories): 'RM2', ]: if stories <= 3: - rise = "L" + rise = 'L' elif stories <= 7: - rise = "M" + rise = 'M' else: - rise = "H" + rise = 'H' return rise -def auto_populate(AIM): +def auto_populate(aim): # noqa: C901 """ Automatically creates a performance model for PGA-based Hazus EQ analysis. @@ -307,255 +271,246 @@ def auto_populate(AIM): CMP: DataFrame Component assignment - Defines the components (in rows) and their location, direction, and quantity (in columns). - """ + """ # extract the General Information - GI = AIM.get('GeneralInformation', None) + gi = aim.get('GeneralInformation', None) - if GI is None: + if gi is None: # TODO: show an error message pass # initialize the auto-populated GI - GI_ap = GI.copy() + gi_ap = gi.copy() - assetType = AIM["assetType"] - ground_failure = AIM["Applications"]["DL"]["ApplicationData"]["ground_failure"] + asset_type = aim['assetType'] + ground_failure = aim['Applications']['DL']['ApplicationData']['ground_failure'] - if assetType == "Buildings": + if asset_type == 'Buildings': # get the building parameters - bt = GI['StructureType'] # building type + bt = gi['StructureType'] # building type # get the design level - dl = GI.get('DesignLevel', None) + dl = gi.get('DesignLevel', None) if dl is None: # If there is no DesignLevel provided, we assume that the YearBuilt is # available - year_built = GI['YearBuilt'] + year_built = gi['YearBuilt'] - if 'W1' in bt: - DesignL = ap_DesignLevel_W1 - else: - DesignL = ap_DesignLevel + design_l = ap_design_level_w1 if 'W1' in bt else ap_design_level - for year in sorted(DesignL.keys()): + for year in sorted(design_l.keys()): if year_built <= year: - dl = DesignL[year] + dl = design_l[year] break - GI_ap['DesignLevel'] = dl + gi_ap['DesignLevel'] = dl # get the number of stories / height - stories = GI.get('NumberOfStories', None) + stories = gi.get('NumberOfStories', None) # We assume that the structure type does not include height information # and we append it here based on the number of story information rise = convert_story_rise(bt, stories) if rise is not None: - LF = f'LF.{bt}.{rise}.{dl}' - GI_ap['BuildingRise'] = rise + lf = f'LF.{bt}.{rise}.{dl}' + gi_ap['BuildingRise'] = rise else: - LF = f'LF.{bt}.{dl}' + lf = f'LF.{bt}.{dl}' - # fmt: off - CMP = pd.DataFrame( # noqa - {f'{LF}': ['ea', 1, 1, 1, 'N/A']}, # noqa - index = ['Units','Location','Direction','Theta_0','Family'] # noqa - ).T # noqa - # fmt: on + comp = pd.DataFrame( + {f'{lf}': ['ea', 1, 1, 1, 'N/A']}, + index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], + ).T # if needed, add components to simulate damage from ground failure if ground_failure: foundation_type = 'S' - FG_GF_H = f'GF.H.{foundation_type}' - FG_GF_V = f'GF.V.{foundation_type}' + fg_gf_h = f'GF.H.{foundation_type}' + fg_gf_v = f'GF.V.{foundation_type}' - # fmt: off - CMP_GF = pd.DataFrame( # noqa - {f'{FG_GF_H}':[ 'ea', 1, 1, 1, 'N/A'], # noqa - f'{FG_GF_V}':[ 'ea', 1, 3, 1, 'N/A']}, # noqa - index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa - ).T # noqa - # fmt: on + CMP_GF = pd.DataFrame( + { + f'{fg_gf_h}': ['ea', 1, 1, 1, 'N/A'], + f'{fg_gf_v}': ['ea', 1, 3, 1, 'N/A'], + }, + index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], + ).T - CMP = pd.concat([CMP, CMP_GF], axis=0) + comp = pd.concat([comp, CMP_GF], axis=0) # set the number of stories to 1 # there is only one component in a building-level resolution stories = 1 # get the occupancy class - if GI['OccupancyClass'] in ap_Occupancy.keys(): - ot = ap_Occupancy[GI['OccupancyClass']] + if gi['OccupancyClass'] in ap_occupancy: + occupancy_type = ap_occupancy[gi['OccupancyClass']] else: - ot = GI['OccupancyClass'] - - DL_ap = { - "Asset": { - "ComponentAssignmentFile": "CMP_QNT.csv", - "ComponentDatabase": "Hazus Earthquake - Buildings", - "NumberOfStories": f"{stories}", - "OccupancyType": f"{ot}", - "PlanArea": "1", + occupancy_type = gi['OccupancyClass'] + + dl_ap = { + 'Asset': { + 'ComponentAssignmentFile': 'CMP_QNT.csv', + 'ComponentDatabase': 'Hazus Earthquake - Buildings', + 'NumberOfStories': f'{stories}', + 'OccupancyType': f'{occupancy_type}', + 'PlanArea': '1', }, - "Damage": {"DamageProcess": "Hazus Earthquake"}, - "Demands": {}, - "Losses": { - "Repair": { - "ConsequenceDatabase": "Hazus Earthquake - Buildings", - "MapApproach": "Automatic", + 'Damage': {'DamageProcess': 'Hazus Earthquake'}, + 'Demands': {}, + 'Losses': { + 'Repair': { + 'ConsequenceDatabase': 'Hazus Earthquake - Buildings', + 'MapApproach': 'Automatic', } }, - "Options": { - "NonDirectionalMultipliers": {"ALL": 1.0}, + 'Options': { + 'NonDirectionalMultipliers': {'ALL': 1.0}, }, } - elif assetType == "TransportationNetwork": - inf_type = GI["assetSubtype"] + elif asset_type == 'TransportationNetwork': + inf_type = gi['assetSubtype'] - if inf_type == "HwyBridge": + if inf_type == 'HwyBridge': # get the bridge class - bt = convertBridgeToHAZUSclass(GI) - GI_ap['BridgeHazusClass'] = bt - - # fmt: off - CMP = pd.DataFrame( # noqa - {f'HWB.GS.{bt[3:]}': [ 'ea', 1, 1, 1, 'N/A'], # noqa - f'HWB.GF': [ 'ea', 1, 1, 1, 'N/A']}, # noqa - index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa - ).T # noqa - # fmt: on - - DL_ap = { - "Asset": { - "ComponentAssignmentFile": "CMP_QNT.csv", - "ComponentDatabase": "Hazus Earthquake - Transportation", - "BridgeHazusClass": bt, - "PlanArea": "1", + bt = convertBridgeToHAZUSclass(gi) + gi_ap['BridgeHazusClass'] = bt + + comp = pd.DataFrame( + { + f'HWB.GS.{bt[3:]}': ['ea', 1, 1, 1, 'N/A'], + 'HWB.GF': ['ea', 1, 1, 1, 'N/A'], + }, + index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], + ).T + + dl_ap = { + 'Asset': { + 'ComponentAssignmentFile': 'CMP_QNT.csv', + 'ComponentDatabase': 'Hazus Earthquake - Transportation', + 'BridgeHazusClass': bt, + 'PlanArea': '1', }, - "Damage": {"DamageProcess": "Hazus Earthquake"}, - "Demands": {}, - "Losses": { - "Repair": { - "ConsequenceDatabase": "Hazus Earthquake - Transportation", - "MapApproach": "Automatic", + 'Damage': {'DamageProcess': 'Hazus Earthquake'}, + 'Demands': {}, + 'Losses': { + 'Repair': { + 'ConsequenceDatabase': 'Hazus Earthquake - Transportation', + 'MapApproach': 'Automatic', } }, - "Options": { - "NonDirectionalMultipliers": {"ALL": 1.0}, + 'Options': { + 'NonDirectionalMultipliers': {'ALL': 1.0}, }, } - elif inf_type == "HwyTunnel": + elif inf_type == 'HwyTunnel': # get the tunnel class - tt = convertTunnelToHAZUSclass(GI) - GI_ap['TunnelHazusClass'] = tt - - # fmt: off - CMP = pd.DataFrame( # noqa - {f'HTU.GS.{tt[3:]}': [ 'ea', 1, 1, 1, 'N/A'], # noqa - f'HTU.GF': [ 'ea', 1, 1, 1, 'N/A']}, # noqa - index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa - ).T # noqa - # fmt: on - - DL_ap = { - "Asset": { - "ComponentAssignmentFile": "CMP_QNT.csv", - "ComponentDatabase": "Hazus Earthquake - Transportation", - "TunnelHazusClass": tt, - "PlanArea": "1", + tt = convertTunnelToHAZUSclass(gi) + gi_ap['TunnelHazusClass'] = tt + + comp = pd.DataFrame( + { + f'HTU.GS.{tt[3:]}': ['ea', 1, 1, 1, 'N/A'], + 'HTU.GF': ['ea', 1, 1, 1, 'N/A'], + }, + index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], + ).T + + dl_ap = { + 'Asset': { + 'ComponentAssignmentFile': 'CMP_QNT.csv', + 'ComponentDatabase': 'Hazus Earthquake - Transportation', + 'TunnelHazusClass': tt, + 'PlanArea': '1', }, - "Damage": {"DamageProcess": "Hazus Earthquake"}, - "Demands": {}, - "Losses": { - "Repair": { - "ConsequenceDatabase": "Hazus Earthquake - Transportation", - "MapApproach": "Automatic", + 'Damage': {'DamageProcess': 'Hazus Earthquake'}, + 'Demands': {}, + 'Losses': { + 'Repair': { + 'ConsequenceDatabase': 'Hazus Earthquake - Transportation', + 'MapApproach': 'Automatic', } }, - "Options": { - "NonDirectionalMultipliers": {"ALL": 1.0}, + 'Options': { + 'NonDirectionalMultipliers': {'ALL': 1.0}, }, } - elif inf_type == "Roadway": + elif inf_type == 'Roadway': # get the road class - rt = convertRoadToHAZUSclass(GI) - GI_ap['RoadHazusClass'] = rt - - # fmt: off - CMP = pd.DataFrame( # noqa - {f'HRD.GF.{rt[3:]}':[ 'ea', 1, 1, 1, 'N/A']}, # noqa - index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa - ).T # noqa - # fmt: on - - DL_ap = { - "Asset": { - "ComponentAssignmentFile": "CMP_QNT.csv", - "ComponentDatabase": "Hazus Earthquake - Transportation", - "RoadHazusClass": rt, - "PlanArea": "1", + rt = convertRoadToHAZUSclass(gi) + gi_ap['RoadHazusClass'] = rt + + comp = pd.DataFrame( + {f'HRD.GF.{rt[3:]}': ['ea', 1, 1, 1, 'N/A']}, + index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], + ).T + + dl_ap = { + 'Asset': { + 'ComponentAssignmentFile': 'CMP_QNT.csv', + 'ComponentDatabase': 'Hazus Earthquake - Transportation', + 'RoadHazusClass': rt, + 'PlanArea': '1', }, - "Damage": {"DamageProcess": "Hazus Earthquake"}, - "Demands": {}, - "Losses": { - "Repair": { - "ConsequenceDatabase": "Hazus Earthquake - Transportation", - "MapApproach": "Automatic", + 'Damage': {'DamageProcess': 'Hazus Earthquake'}, + 'Demands': {}, + 'Losses': { + 'Repair': { + 'ConsequenceDatabase': 'Hazus Earthquake - Transportation', + 'MapApproach': 'Automatic', } }, - "Options": { - "NonDirectionalMultipliers": {"ALL": 1.0}, + 'Options': { + 'NonDirectionalMultipliers': {'ALL': 1.0}, }, } else: - print("subtype not supported in HWY") + print('subtype not supported in HWY') - elif assetType == "WaterDistributionNetwork": + elif asset_type == 'WaterDistributionNetwork': pipe_material_map = { - "CI": "B", - "AC": "B", - "RCC": "B", - "DI": "D", - "PVC": "D", - "DS": "B", - "BS": "D", + 'CI': 'B', + 'AC': 'B', + 'RCC': 'B', + 'DI': 'D', + 'PVC': 'D', + 'DS': 'B', + 'BS': 'D', } # GI = AIM.get("GeneralInformation", None) # if GI==None: # initialize the auto-populated GI - wdn_element_type = GI_ap.get("type", "MISSING") - asset_name = GI_ap.get("AIM_id", None) + wdn_element_type = gi_ap.get('type', 'MISSING') + asset_name = gi_ap.get('AIM_id', None) - if wdn_element_type == "Pipe": - pipe_construction_year = GI_ap.get("year", None) - pipe_diameter = GI_ap.get("Diam", None) + if wdn_element_type == 'Pipe': + pipe_construction_year = gi_ap.get('year', None) + pipe_diameter = gi_ap.get('Diam', None) # diamaeter value is a fundamental part of hydraulic # performance assessment if pipe_diameter is None: - raise ValueError( - f"pipe diamater in asset type {assetType}, \ - asset id \"{asset_name}\" has no diameter \ - value." - ) + msg = f'pipe diameter in asset type {asset_type}, \ + asset id "{asset_name}" has no diameter \ + value.' + raise ValueError(msg) - pipe_length = GI_ap.get("Len", None) + pipe_length = gi_ap.get('Len', None) # length value is a fundamental part of hydraulic performance assessment if pipe_diameter is None: - raise ValueError( - f"pipe length in asset type {assetType}, \ - asset id \"{asset_name}\" has no diameter \ - value." - ) + msg = f'pipe length in asset type {asset_type}, \ + asset id "{asset_name}" has no diameter \ + value.' + raise ValueError(msg) - pipe_material = GI_ap.get("material", None) + pipe_material = gi_ap.get('material', None) # pipe material can be not available or named "missing" in # both case, pipe flexibility will be set to "missing" @@ -575,50 +530,34 @@ def auto_populate(AIM): """ if pipe_material is None: if pipe_diameter > 20 * 0.0254: # 20 inches in meter - print( - f"Asset {asset_name} is missing material. Material is\ - assumed to be Cast Iron" - ) - pipe_material = "CI" + pipe_material = 'CI' else: - print( - f"Asset {asset_name} is missing material. Material is " - f"assumed to be Steel (ST)" - ) - pipe_material = "ST" + pipe_material = 'ST' - if pipe_material == "ST": + if pipe_material == 'ST': if (pipe_construction_year is not None) and ( pipe_construction_year >= 1935 ): - print( - f"Asset {asset_name} has material of \"ST\" is assumed to be\ - Ductile Steel" - ) - pipe_material = "DS" + pipe_material = 'DS' else: - print( - f'Asset {asset_name} has material of "ST" is assumed to be ' - f'Brittle Steel' - ) - pipe_material = "BS" + pipe_material = 'BS' - pipe_flexibility = pipe_material_map.get(pipe_material, "missing") + pipe_flexibility = pipe_material_map.get(pipe_material, 'missing') - GI_ap["material flexibility"] = pipe_flexibility - GI_ap["material"] = pipe_material + gi_ap['material flexibility'] = pipe_flexibility + gi_ap['material'] = pipe_material # Pipes are broken into 20ft segments (rounding up) and # each segment is represented by an individual entry in - # the performance model, `CMP`. The damage capcity of each + # the performance model, `CMP`. The damage capacity of each # segment is assumed to be independent and driven by the # same EDP. We therefore replicate the EDP associated with - # the pipe to the various locations assgined to the + # the pipe to the various locations assigned to the # segments. # Determine number of segments - pipe_length_unit = GI_ap['units']['length'] + pipe_length_unit = gi_ap['units']['length'] pipe_length_feet = pelicun.base.convert_units( pipe_length, unit=pipe_length_unit, to_unit='ft', category='length' ) @@ -629,20 +568,29 @@ def auto_populate(AIM): else: # In all other cases, round up. num_segments = int(pipe_length_feet / reference_length) + 1 - if num_segments > 1: - location_string = f'1--{num_segments}' - else: - location_string = '1' + location_string = f'1--{num_segments}' if num_segments > 1 else '1' # Define performance model - # fmt: off - CMP = pd.DataFrame( # noqa - {f'PWP.{pipe_flexibility}.GS': ['ea', location_string, '0', 1, 'N/A'], # noqa - f'PWP.{pipe_flexibility}.GF': ['ea', location_string, '0', 1, 'N/A'], # noqa - 'aggregate': ['ea', location_string, '0', 1, 'N/A']}, # noqa - index = ['Units','Location','Direction','Theta_0','Family'] # noqa - ).T # noqa - # fmt: on + comp = pd.DataFrame( + { + f'PWP.{pipe_flexibility}.GS': [ + 'ea', + location_string, + '0', + 1, + 'N/A', + ], + f'PWP.{pipe_flexibility}.GF': [ + 'ea', + location_string, + '0', + 1, + 'N/A', + ], + 'aggregate': ['ea', location_string, '0', 1, 'N/A'], + }, + index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], + ).T # Set up the demand cloning configuration for the pipe # segments, if required. @@ -668,112 +616,88 @@ def auto_populate(AIM): # Create damage process dmg_process = { - f"1_PWP.{pipe_flexibility}.GS-LOC": {"DS1": "aggregate_DS1"}, - f"2_PWP.{pipe_flexibility}.GF-LOC": {"DS1": "aggregate_DS1"}, - f"3_PWP.{pipe_flexibility}.GS-LOC": {"DS2": "aggregate_DS2"}, - f"4_PWP.{pipe_flexibility}.GF-LOC": {"DS2": "aggregate_DS2"}, + f'1_PWP.{pipe_flexibility}.GS-LOC': {'DS1': 'aggregate_DS1'}, + f'2_PWP.{pipe_flexibility}.GF-LOC': {'DS1': 'aggregate_DS1'}, + f'3_PWP.{pipe_flexibility}.GS-LOC': {'DS2': 'aggregate_DS2'}, + f'4_PWP.{pipe_flexibility}.GF-LOC': {'DS2': 'aggregate_DS2'}, } dmg_process_filename = 'dmg_process.json' with open(dmg_process_filename, 'w', encoding='utf-8') as f: json.dump(dmg_process, f, indent=2) # Define the auto-populated config - DL_ap = { - "Asset": { - "ComponentAssignmentFile": "CMP_QNT.csv", - "ComponentDatabase": "Hazus Earthquake - Water", - "Material Flexibility": pipe_flexibility, - "PlanArea": "1", # Sina: does not make sense for water. + dl_ap = { + 'Asset': { + 'ComponentAssignmentFile': 'CMP_QNT.csv', + 'ComponentDatabase': 'Hazus Earthquake - Water', + 'Material Flexibility': pipe_flexibility, + 'PlanArea': '1', # Sina: does not make sense for water. # Kept it here since itw as also # kept here for Transportation }, - "Damage": { - "DamageProcess": "User Defined", - "DamageProcessFilePath": "dmg_process.json", + 'Damage': { + 'DamageProcess': 'User Defined', + 'DamageProcessFilePath': 'dmg_process.json', }, - "Demands": demand_config, + 'Demands': demand_config, } - elif wdn_element_type == "Tank": + elif wdn_element_type == 'Tank': tank_cmp_lines = { - ("OG", "C", 1): {'PST.G.C.A.GS': ['ea', 1, 1, 1, 'N/A']}, - ("OG", "C", 0): {'PST.G.C.U.GS': ['ea', 1, 1, 1, 'N/A']}, - ("OG", "S", 1): {'PST.G.S.A.GS': ['ea', 1, 1, 1, 'N/A']}, - ("OG", "S", 0): {'PST.G.S.U.GS': ['ea', 1, 1, 1, 'N/A']}, + ('OG', 'C', 1): {'PST.G.C.A.GS': ['ea', 1, 1, 1, 'N/A']}, + ('OG', 'C', 0): {'PST.G.C.U.GS': ['ea', 1, 1, 1, 'N/A']}, + ('OG', 'S', 1): {'PST.G.S.A.GS': ['ea', 1, 1, 1, 'N/A']}, + ('OG', 'S', 0): {'PST.G.S.U.GS': ['ea', 1, 1, 1, 'N/A']}, # Anchored status and Wood is not defined for On Ground tanks - ("OG", "W", 0): {'PST.G.W.GS': ['ea', 1, 1, 1, 'N/A']}, + ('OG', 'W', 0): {'PST.G.W.GS': ['ea', 1, 1, 1, 'N/A']}, # Anchored status and Steel is not defined for Above Ground tanks - ("AG", "S", 0): {'PST.A.S.GS': ['ea', 1, 1, 1, 'N/A']}, + ('AG', 'S', 0): {'PST.A.S.GS': ['ea', 1, 1, 1, 'N/A']}, # Anchored status and Concrete is not defined for Buried tanks. - ("B", "C", 0): {'PST.B.C.GF': ['ea', 1, 1, 1, 'N/A']}, + ('B', 'C', 0): {'PST.B.C.GF': ['ea', 1, 1, 1, 'N/A']}, } # The default values are assumed: material = Concrete (C), # location= On Ground (OG), and Anchored = 1 - tank_material = GI_ap.get("material", "C") - tank_location = GI_ap.get("location", "OG") - tank_anchored = GI_ap.get("anchored", int(1)) + tank_material = gi_ap.get('material', 'C') + tank_location = gi_ap.get('location', 'OG') + tank_anchored = gi_ap.get('anchored', 1) - tank_material_allowable = {"C", "S"} + tank_material_allowable = {'C', 'S'} if tank_material not in tank_material_allowable: - raise ValueError( - f"Tank's material = \"{tank_material}\" is \ + msg = f'Tank\'s material = "{tank_material}" is \ not allowable in tank {asset_name}. The \ material must be either C for concrete or S \ - for steel." - ) + for steel.' + raise ValueError(msg) - tank_location_allowable = {"AG", "OG", "B"} + tank_location_allowable = {'AG', 'OG', 'B'} if tank_location not in tank_location_allowable: - raise ValueError( - f"Tank's location = \"{tank_location}\" is \ + msg = f'Tank\'s location = "{tank_location}" is \ not allowable in tank {asset_name}. The \ - location must be either \"AG\" for Above \ - ground, \"OG\" for On Ground or \"BG\" for \ - Bellow Ground (burried) Tanks." - ) + location must be either "AG" for Above \ + ground, "OG" for On Ground or "BG" for \ + Below Ground (buried) Tanks.' + raise ValueError(msg) - tank_anchored_allowable = {int(0), int(1)} + tank_anchored_allowable = {0, 1} if tank_anchored not in tank_anchored_allowable: - raise ValueError( - f"Tank's anchored status = \"{tank_location}\ - \" is not allowable in tank {asset_name}. \ + msg = f'Tank\'s anchored status = "{tank_location}\ + " is not allowable in tank {asset_name}. \ The anchored status must be either integer\ - value 0 for unachored, or 1 for anchored" - ) - - if tank_location == "AG" and tank_material == "C": - print( - f"The tank {asset_name} is Above Ground (i.e., AG), but \ - the material type is Concrete (\"C\"). Tank type \"C\" is not \ - defiend for AG tanks. The tank is assumed to be Steel (\"S\")" - ) - tank_material = "S" - - if tank_location == "AG" and tank_material == "W": - print( - f"The tank {asset_name} is Above Ground (i.e., AG), but \ - the material type is Wood (\"W\"). Tank type \"W\" is not \ - defiend for AG tanks. The tank is assumed to be Steel (\"S\")" - ) - tank_material = "S" - - if tank_location == "B" and tank_material == "S": - print( - f"The tank {asset_name} is burried (i.e., B), but the\ - material type is Steel (\"S\"). \ - Tank type \"S\" is not defiend for\ - B tanks. The tank is assumed to be Concrete (\"C\")" - ) - tank_material = "C" - - if tank_location == "B" and tank_material == "W": - print( - f"The tank {asset_name} is burried (i.e., B), but the\ - material type is Wood (\"W\"). Tank type \"W\" is not defiend \ - for B tanks. The tank is assumed to be Concrete (\"C\")" - ) - tank_material = "C" + value 0 for unachored, or 1 for anchored' + raise ValueError(msg) + + if tank_location == 'AG' and tank_material == 'C': + tank_material = 'S' + + if tank_location == 'AG' and tank_material == 'W': + tank_material = 'S' + + if tank_location == 'B' and tank_material == 'S': + tank_material = 'C' + + if tank_location == 'B' and tank_material == 'W': + tank_material = 'C' if tank_anchored == 1: # Since anchore status does nto matter, there is no need to @@ -784,37 +708,37 @@ def auto_populate(AIM): (tank_location, tank_material, tank_anchored) ] - CMP = pd.DataFrame( + comp = pd.DataFrame( cur_tank_cmp_line, index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], ).T - DL_ap = { - "Asset": { - "ComponentAssignmentFile": "CMP_QNT.csv", - "ComponentDatabase": "Hazus Earthquake - Water", - "Material": tank_material, - "Location": tank_location, - "Anchored": tank_anchored, - "PlanArea": "1", # Sina: does not make sense for water. + dl_ap = { + 'Asset': { + 'ComponentAssignmentFile': 'CMP_QNT.csv', + 'ComponentDatabase': 'Hazus Earthquake - Water', + 'Material': tank_material, + 'Location': tank_location, + 'Anchored': tank_anchored, + 'PlanArea': '1', # Sina: does not make sense for water. # Kept it here since itw as also kept here for Transportation }, - "Damage": {"DamageProcess": "Hazus Earthquake"}, - "Demands": {}, + 'Damage': {'DamageProcess': 'Hazus Earthquake'}, + 'Demands': {}, } else: print( - f"Water Distribution network element type {wdn_element_type} " - f"is not supported in Hazus Earthquake IM DL method" + f'Water Distribution network element type {wdn_element_type} ' + f'is not supported in Hazus Earthquake IM DL method' ) - DL_ap = None - CMP = None + dl_ap = None + comp = None else: print( - f"AssetType: {assetType} is not supported " - f"in Hazus Earthquake IM DL method" + f'AssetType: {asset_type} is not supported ' + f'in Hazus Earthquake IM DL method' ) - return GI_ap, DL_ap, CMP + return gi_ap, dl_ap, comp diff --git a/pelicun/resources/auto/Hazus_Earthquake_Story.py b/pelicun/resources/auto/Hazus_Earthquake_Story.py index 4b29a3feb..e7f7597f5 100644 --- a/pelicun/resources/auto/Hazus_Earthquake_Story.py +++ b/pelicun/resources/auto/Hazus_Earthquake_Story.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2023 Leland Stanford Junior University # Copyright (c) 2023 The Regents of the University of California @@ -38,15 +37,16 @@ # Adam Zsarnóczay from __future__ import annotations + import pandas as pd -ap_DesignLevel = {1940: 'LC', 1975: 'MC', 2100: 'HC'} +ap_design_level = {1940: 'LC', 1975: 'MC', 2100: 'HC'} # ap_DesignLevel = {1940: 'PC', 1940: 'LC', 1975: 'MC', 2100: 'HC'} -ap_DesignLevel_W1 = {0: 'LC', 1975: 'MC', 2100: 'HC'} +ap_design_level_w1 = {0: 'LC', 1975: 'MC', 2100: 'HC'} # ap_DesignLevel_W1 = {0: 'PC', 0: 'LC', 1975: 'MC', 2100: 'HC'} -ap_Occupancy = { +ap_occupancy = { 'Other/Unknown': 'RES3', 'Residential - Single-Family': 'RES1', 'Residential - Town-Home': 'RES3', @@ -70,7 +70,7 @@ } -def story_scale(stories, comp_type): +def story_scale(stories, comp_type): # noqa: C901 if comp_type == 'NSA': if stories == 1: return 1.00 @@ -108,11 +108,7 @@ def story_scale(stories, comp_type): return 2.75 elif stories == 5: return 3.00 - elif stories == 6: - return 3.50 - elif stories == 7: - return 3.50 - elif stories == 8: + elif stories in (6, 7, 8): return 3.50 elif stories == 9: return 4.50 @@ -122,9 +118,10 @@ def story_scale(stories, comp_type): return 7.30 else: return 1.0 + return None -def auto_populate(AIM): +def auto_populate(aim): """ Automatically creates a performance model for story EDP-based Hazus EQ analysis. @@ -148,69 +145,66 @@ def auto_populate(AIM): CMP: DataFrame Component assignment - Defines the components (in rows) and their location, direction, and quantity (in columns). - """ + """ # extract the General Information - GI = AIM.get('GeneralInformation', None) + gi = aim.get('GeneralInformation', None) - if GI is None: + if gi is None: # TODO: show an error message pass # initialize the auto-populated GI - GI_ap = GI.copy() + gi_ap = gi.copy() - assetType = AIM["assetType"] - ground_failure = AIM["Applications"]["DL"]["ApplicationData"]["ground_failure"] + asset_type = aim['assetType'] + ground_failure = aim['Applications']['DL']['ApplicationData']['ground_failure'] - if assetType == "Buildings": + if asset_type == 'Buildings': # get the building parameters - bt = GI['StructureType'] # building type + bt = gi['StructureType'] # building type # get the design level - dl = GI.get('DesignLevel', None) + dl = gi.get('DesignLevel', None) if dl is None: # If there is no DesignLevel provided, we assume that the YearBuilt is # available - year_built = GI['YearBuilt'] + year_built = gi['YearBuilt'] - if 'W1' in bt: - DesignL = ap_DesignLevel_W1 - else: - DesignL = ap_DesignLevel + design_l = ap_design_level_w1 if 'W1' in bt else ap_design_level - for year in sorted(DesignL.keys()): + for year in sorted(design_l.keys()): if year_built <= year: - dl = DesignL[year] + dl = design_l[year] break - GI_ap['DesignLevel'] = dl + gi_ap['DesignLevel'] = dl # get the number of stories / height - stories = GI.get('NumberOfStories', None) + stories = gi.get('NumberOfStories', None) - FG_S = f'STR.{bt}.{dl}' - FG_NSD = 'NSD' - FG_NSA = f'NSA.{dl}' + fg_s = f'STR.{bt}.{dl}' + fg_nsd = 'NSD' + fg_nsa = f'NSA.{dl}' - CMP = pd.DataFrame( + comp = pd.DataFrame( { - f'{FG_S}': [ + f'{fg_s}': [ 'ea', 'all', '1, 2', f"{story_scale(stories, 'S') / stories / 2.}", 'N/A', ], - f'{FG_NSA}': [ + f'{fg_nsa}': [ 'ea', 'all', 0, f"{story_scale(stories, 'NSA') / stories}", 'N/A', ], - f'{FG_NSD}': [ + f'{fg_nsd}': [ 'ea', 'all', '1, 2', @@ -225,57 +219,57 @@ def auto_populate(AIM): if ground_failure: foundation_type = 'S' - # fmt: off - FG_GF_H = f'GF.H.{foundation_type}' # noqa - FG_GF_V = f'GF.V.{foundation_type}' # noqa - CMP_GF = pd.DataFrame( # noqa - {f'{FG_GF_H}':[ 'ea', 1, 1, 1, 'N/A'], # noqa - f'{FG_GF_V}':[ 'ea', 1, 3, 1, 'N/A']}, # noqa - index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa - ).T # noqa - # fmt: on + FG_GF_H = f'GF.H.{foundation_type}' + FG_GF_V = f'GF.V.{foundation_type}' + CMP_GF = pd.DataFrame( + { + f'{FG_GF_H}': ['ea', 1, 1, 1, 'N/A'], + f'{FG_GF_V}': ['ea', 1, 3, 1, 'N/A'], + }, + index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], + ).T - CMP = pd.concat([CMP, CMP_GF], axis=0) + comp = pd.concat([comp, CMP_GF], axis=0) # get the occupancy class - if GI['OccupancyClass'] in ap_Occupancy.keys(): - ot = ap_Occupancy[GI['OccupancyClass']] + if gi['OccupancyClass'] in ap_occupancy: + occupancy_type = ap_occupancy[gi['OccupancyClass']] else: - ot = GI['OccupancyClass'] + occupancy_type = gi['OccupancyClass'] - plan_area = GI.get('PlanArea', 1.0) + plan_area = gi.get('PlanArea', 1.0) repair_config = { - "ConsequenceDatabase": "Hazus Earthquake - Stories", - "MapApproach": "Automatic", - "DecisionVariables": { - "Cost": True, - "Carbon": False, - "Energy": False, - "Time": False, + 'ConsequenceDatabase': 'Hazus Earthquake - Stories', + 'MapApproach': 'Automatic', + 'DecisionVariables': { + 'Cost': True, + 'Carbon': False, + 'Energy': False, + 'Time': False, }, } - DL_ap = { - "Asset": { - "ComponentAssignmentFile": "CMP_QNT.csv", - "ComponentDatabase": "Hazus Earthquake - Stories", - "NumberOfStories": f"{stories}", - "OccupancyType": f"{ot}", - "PlanArea": str(plan_area), + dl_ap = { + 'Asset': { + 'ComponentAssignmentFile': 'CMP_QNT.csv', + 'ComponentDatabase': 'Hazus Earthquake - Stories', + 'NumberOfStories': f'{stories}', + 'OccupancyType': f'{occupancy_type}', + 'PlanArea': str(plan_area), }, - "Damage": {"DamageProcess": "Hazus Earthquake"}, - "Demands": {}, - "Losses": {"Repair": repair_config}, - "Options": { - "NonDirectionalMultipliers": {"ALL": 1.0}, + 'Damage': {'DamageProcess': 'Hazus Earthquake'}, + 'Demands': {}, + 'Losses': {'Repair': repair_config}, + 'Options': { + 'NonDirectionalMultipliers': {'ALL': 1.0}, }, } else: print( - f"AssetType: {assetType} is not supported " - f"in Hazus Earthquake Story-based DL method" + f'AssetType: {asset_type} is not supported ' + f'in Hazus Earthquake Story-based DL method' ) - return GI_ap, DL_ap, CMP + return gi_ap, dl_ap, comp diff --git a/pelicun/settings/default_units.json b/pelicun/settings/default_units.json index fe9a3122f..1b387b4a4 100644 --- a/pelicun/settings/default_units.json +++ b/pelicun/settings/default_units.json @@ -37,7 +37,7 @@ "inchps": 0.0254, "ftps": 0.3048 }, - "accelleration": { + "acceleration": { "mps2": 1.0, "inps2": 0.0254, "inchps2": 0.0254, diff --git a/pelicun/tests/__init__.py b/pelicun/tests/__init__.py index 72c332008..1d9bf2ac7 100644 --- a/pelicun/tests/__init__.py +++ b/pelicun/tests/__init__.py @@ -31,3 +31,5 @@ # # You should have received a copy of the BSD 3-Clause License along with # pelicun. If not, see . + +"""Pelicun Unit Tests.""" diff --git a/pelicun/tests/basic/data/base/test_parse_units/duplicate.json b/pelicun/tests/basic/data/base/test_parse_units/duplicate.json index 2fcbf47ca..1baa810f2 100644 --- a/pelicun/tests/basic/data/base/test_parse_units/duplicate.json +++ b/pelicun/tests/basic/data/base/test_parse_units/duplicate.json @@ -39,7 +39,7 @@ "inchps": 0.0254, "ftps": 0.3048 }, - "accelleration": { + "acceleration": { "mps2": 1.0, "inps2": 0.0254, "inchps2": 0.0254, diff --git a/pelicun/tests/basic/data/base/test_parse_units/duplicate2.json b/pelicun/tests/basic/data/base/test_parse_units/duplicate2.json index f0c492e9a..70e60e630 100644 --- a/pelicun/tests/basic/data/base/test_parse_units/duplicate2.json +++ b/pelicun/tests/basic/data/base/test_parse_units/duplicate2.json @@ -38,7 +38,7 @@ "inchps": 0.0254, "ftps": 0.3048 }, - "accelleration": { + "acceleration": { "mps2": 1.0, "inps2": 0.0254, "inchps2": 0.0254, diff --git a/pelicun/tests/basic/reset_tests.py b/pelicun/tests/basic/reset_tests.py index 34fcad11d..8c7db171f 100644 --- a/pelicun/tests/basic/reset_tests.py +++ b/pelicun/tests/basic/reset_tests.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,19 +37,18 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -This file is used to reset all expected test result data. -""" +"""This file is used to reset all expected test result data.""" from __future__ import annotations -import os -import re -import glob + import ast import importlib +import os +import re +from pathlib import Path -def reset_all_test_data(restore=True, purge=False): +def reset_all_test_data(*, restore: bool = True, purge: bool = False) -> None: # noqa: C901 """ Update the expected result pickle files with new results, accepting the values obtained by executing the code as correct from now on. @@ -78,7 +76,6 @@ def reset_all_test_data(restore=True, purge=False): Raises ------ - ValueError If the test directory is not found. @@ -88,18 +85,19 @@ def reset_all_test_data(restore=True, purge=False): `pelicun` directory. Dangerous things may happen otherwise. """ - - cwd = os.path.basename(os.getcwd()) + cwd = Path.cwd() if cwd != 'pelicun': - raise OSError( + msg = ( 'Wrong directory. ' 'See the docstring of `reset_all_test_data`. Aborting' ) + raise OSError(msg) # where the test result data are stored - testdir = os.path.join(*('tests', 'data')) - if not os.path.exists(testdir): - raise ValueError('pelicun/tests/basic/data directory not found.') + testdir = Path('tests') / 'data' + if not testdir.exists(): + msg = 'pelicun/tests/basic/data directory not found.' + raise ValueError(msg) # clean up existing test result data # only remove .pcl files that start with `test_` @@ -108,18 +106,15 @@ def reset_all_test_data(restore=True, purge=False): for root, _, files in os.walk('.'): for filename in files: if pattern.match(filename): - full_name = os.path.join(root, filename) - print(f'removing: {full_name}') - file_path = full_name - os.remove(file_path) + (Path(root) / filename).unlink() # generate new data if restore: # get a list of all existing test files and iterate - test_files = glob.glob('tests/*test*.py') + test_files = list(Path('tests').glob('*test*.py')) for test_file in test_files: # open the file and statically parse the code looking for functions - with open(test_file, 'r', encoding='utf-8') as file: + with Path(test_file).open(encoding='utf-8') as file: node = ast.parse(file.read()) functions = [n for n in node.body if isinstance(n, ast.FunctionDef)] # iterate over the functions looking for test_ functions @@ -131,7 +126,7 @@ def reset_all_test_data(restore=True, purge=False): if 'reset' in arguments: # we want to import it and run it with reset=True # construct a module name, like 'tests.test_uq' - module_name = 'tests.' + os.path.basename(test_file).replace( + module_name = 'tests.' + Path(test_file).name.replace( '.py', '' ) # import the module @@ -139,5 +134,4 @@ def reset_all_test_data(restore=True, purge=False): # get the function func = getattr(module, function.name) # run it to reset its expected test output data - print(f'running: {function.name} from {module_name}') func(reset=True) diff --git a/pelicun/tests/basic/test_assessment.py b/pelicun/tests/basic/test_assessment.py index bec35e975..04e1b7822 100644 --- a/pelicun/tests/basic/test_assessment.py +++ b/pelicun/tests/basic/test_assessment.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,27 +37,20 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -These are unit and integration tests on the assessment module of pelicun. -""" +"""These are unit and integration tests on the assessment module of pelicun.""" from __future__ import annotations + import pytest -from pelicun import assessment -# pylint: disable=missing-function-docstring -# pylint: disable=missing-return-doc, missing-return-type-doc +from pelicun import assessment -def create_assessment_obj(config=None): - if config: - asmt = assessment.Assessment(config) - else: - asmt = assessment.Assessment({}) - return asmt +def create_assessment_obj(config: dict | None = None) -> assessment.Assessment: + return assessment.Assessment(config) if config else assessment.Assessment({}) -def test_Assessment_init(): +def test_Assessment_init() -> None: asmt = create_assessment_obj() # confirm attributes for attribute in ( @@ -78,11 +70,10 @@ def test_Assessment_init(): assert hasattr(asmt, attribute) # confirm that creating an attribute on the fly is not allowed with pytest.raises(AttributeError): - # pylint: disable=assigning-non-slot - asmt.my_attribute = 2 + asmt.my_attribute = 2 # type: ignore -def test_assessment_get_default_metadata(): +def test_assessment_get_default_metadata() -> None: asmt = create_assessment_obj() data_sources = ( @@ -101,7 +92,7 @@ def test_assessment_get_default_metadata(): asmt.get_default_metadata(data_source) -def test_assessment_calc_unit_scale_factor(): +def test_assessment_calc_unit_scale_factor() -> None: # default unit file asmt = create_assessment_obj() @@ -135,7 +126,7 @@ def test_assessment_calc_unit_scale_factor(): # 1 smoot was 67 inches in 1958. -def test_assessment_scale_factor(): +def test_assessment_scale_factor() -> None: # default unit file asmt = create_assessment_obj() assert asmt.scale_factor('m') == 1.00 @@ -156,5 +147,5 @@ def test_assessment_scale_factor(): assert asmt.scale_factor('m') == 39.3701 # exceptions - with pytest.raises(ValueError): + with pytest.raises(ValueError, match='Unknown unit: helen'): asmt.scale_factor('helen') diff --git a/pelicun/tests/basic/test_asset_model.py b/pelicun/tests/basic/test_asset_model.py index 077087b54..eccde38d0 100644 --- a/pelicun/tests/basic/test_asset_model.py +++ b/pelicun/tests/basic/test_asset_model.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,38 +37,39 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -These are unit and integration tests on the asset model of pelicun. -""" +"""These are unit and integration tests on the asset model of pelicun.""" from __future__ import annotations + import tempfile from copy import deepcopy -import pytest +from typing import TYPE_CHECKING + import numpy as np import pandas as pd +import pytest + from pelicun import assessment +from pelicun.base import ensure_value from pelicun.tests.basic.test_pelicun_model import TestPelicunModel -# pylint: disable=missing-function-docstring -# pylint: disable=missing-class-docstring -# pylint: disable=arguments-renamed -# pylint: disable=missing-return-doc,missing-return-type-doc +if TYPE_CHECKING: + from pelicun.model.asset_model import AssetModel class TestAssetModel(TestPelicunModel): @pytest.fixture - def asset_model(self, assessment_instance): + def asset_model(self, assessment_instance: assessment.Assessment) -> AssetModel: return deepcopy(assessment_instance.asset) - def test_init(self, asset_model): + def test_init_method(self, asset_model: AssetModel) -> None: assert asset_model.log assert asset_model.cmp_marginal_params is None assert asset_model.cmp_units is None assert asset_model._cmp_RVs is None assert asset_model.cmp_sample is None - def test_save_cmp_sample(self, asset_model): + def test_save_cmp_sample(self, asset_model: AssetModel) -> None: asset_model.cmp_sample = pd.DataFrame( { ('component_a', f'{i}', f'{j}', '0'): 8.0 @@ -105,10 +105,10 @@ def test_save_cmp_sample(self, asset_model): # also test loading sample to variables # (but we don't inspect them) - _ = asset_model.save_cmp_sample(save_units=False) - _, _ = asset_model.save_cmp_sample(save_units=True) + asset_model.save_cmp_sample(save_units=False) + asset_model.save_cmp_sample(save_units=True) - def test_load_cmp_model_1(self, asset_model): + def test_load_cmp_model_1(self, asset_model: AssetModel) -> None: cmp_marginals = pd.read_csv( 'pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals.csv', index_col=0, @@ -135,7 +135,7 @@ def test_load_cmp_model_1(self, asset_model): pd.testing.assert_frame_equal( expected_cmp_marginal_params, - asset_model.cmp_marginal_params, + ensure_value(asset_model.cmp_marginal_params), check_index_type=False, check_column_type=False, check_dtype=False, @@ -147,11 +147,11 @@ def test_load_cmp_model_1(self, asset_model): pd.testing.assert_series_equal( expected_cmp_units, - asset_model.cmp_units, + ensure_value(asset_model.cmp_units), check_index_type=False, ) - def test_load_cmp_model_2(self, asset_model): + def test_load_cmp_model_2(self, asset_model: AssetModel) -> None: # component marginals utilizing the keywords '--', 'all', 'top', 'roof' cmp_marginals = pd.read_csv( 'pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals_2.csv', @@ -160,7 +160,7 @@ def test_load_cmp_model_2(self, asset_model): asset_model._asmnt.stories = 4 asset_model.load_cmp_model({'marginals': cmp_marginals}) - assert asset_model.cmp_marginal_params.to_dict() == { + assert ensure_value(asset_model.cmp_marginal_params).to_dict() == { 'Theta_0': { ('component_a', '0', '1', '0'): 1.0, ('component_a', '0', '2', '0'): 1.0, @@ -209,23 +209,25 @@ def test_load_cmp_model_2(self, asset_model): pd.testing.assert_series_equal( expected_cmp_units, - asset_model.cmp_units, + ensure_value(asset_model.cmp_units), check_index_type=False, ) - def test_load_cmp_model_csv(self, asset_model): + def test_load_cmp_model_csv(self, asset_model: AssetModel) -> None: # load by directly specifying the csv file cmp_marginals = 'pelicun/tests/basic/data/model/test_AssetModel/CMP' asset_model.load_cmp_model(cmp_marginals) - def test_load_cmp_model_exceptions(self, asset_model): + def test_load_cmp_model_exceptions(self, asset_model: AssetModel) -> None: cmp_marginals = pd.read_csv( 'pelicun/tests/basic/data/model/test_AssetModel/' 'CMP_marginals_invalid_loc.csv', index_col=0, ) asset_model._asmnt.stories = 4 - with pytest.raises(ValueError): + with pytest.raises( + ValueError, match='Cannot parse location string: basement' + ): asset_model.load_cmp_model({'marginals': cmp_marginals}) cmp_marginals = pd.read_csv( @@ -234,10 +236,12 @@ def test_load_cmp_model_exceptions(self, asset_model): index_col=0, ) asset_model._asmnt.stories = 4 - with pytest.raises(ValueError): + with pytest.raises( + ValueError, match='Cannot parse direction string: non-directional' + ): asset_model.load_cmp_model({'marginals': cmp_marginals}) - def test_generate_cmp_sample(self, asset_model): + def test_generate_cmp_sample(self, asset_model: AssetModel) -> None: asset_model.cmp_marginal_params = pd.DataFrame( {'Theta_0': (8.0, 8.0, 8.0, 8.0), 'Blocks': (1.0, 1.0, 1.0, 1.0)}, index=pd.MultiIndex.from_tuples( @@ -278,25 +282,27 @@ def test_generate_cmp_sample(self, asset_model): pd.testing.assert_frame_equal( expected_cmp_sample, - asset_model.cmp_sample, + ensure_value(asset_model.cmp_sample), check_index_type=False, check_column_type=False, ) - def test_generate_cmp_sample_exceptions_1(self, asset_model): + def test_generate_cmp_sample_exceptions_1(self, asset_model: AssetModel) -> None: # without marginal parameters - with pytest.raises(ValueError): + with pytest.raises( + ValueError, match='Model parameters have not been specified' + ): asset_model.generate_cmp_sample(sample_size=10) - def test_generate_cmp_sample_exceptions_2(self, asset_model): + def test_generate_cmp_sample_exceptions_2(self, asset_model: AssetModel) -> None: # without specifying sample size cmp_marginals = pd.read_csv( 'pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals.csv', index_col=0, ) asset_model.load_cmp_model({'marginals': cmp_marginals}) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match='Sample size was not specified'): asset_model.generate_cmp_sample() # but it should work if a demand sample is available - asset_model._asmnt.demand.sample = np.empty(shape=(10, 2)) + asset_model._asmnt.demand.sample = pd.DataFrame(np.empty(shape=(10, 2))) asset_model.generate_cmp_sample() diff --git a/pelicun/tests/basic/test_auto.py b/pelicun/tests/basic/test_auto.py index 3a96025d0..a91081cc2 100644 --- a/pelicun/tests/basic/test_auto.py +++ b/pelicun/tests/basic/test_auto.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,22 +37,16 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -These are unit and integration tests on the auto module of pelicun. - -""" +"""These are unit and integration tests on the auto module of pelicun.""" from __future__ import annotations -from unittest.mock import patch -from unittest.mock import MagicMock -import pytest -from pelicun.auto import auto_populate +from pathlib import Path +from unittest.mock import MagicMock, patch -# pylint: disable=missing-function-docstring -# pylint: disable=missing-return-doc,missing-return-type-doc -# pylint: disable=redefined-outer-name +import pytest +from pelicun.auto import auto_populate # The tests maintain the order of definitions of the `auto.py` file. @@ -67,21 +60,21 @@ @pytest.fixture -def setup_valid_config(): +def setup_valid_config() -> dict: return {'GeneralInformation': {'someKey': 'someValue'}} @pytest.fixture -def setup_auto_script_path(): +def setup_auto_script_path() -> str: return 'PelicunDefault/test_script' @pytest.fixture -def setup_expected_base_path(): +def setup_expected_base_path() -> str: return '/expected/path/resources/auto/' -def test_valid_inputs(setup_valid_config, setup_auto_script_path): +def test_valid_inputs(setup_valid_config: dict, setup_auto_script_path: str) -> None: with patch('pelicun.base.pelicun_path', '/expected/path'), patch( 'os.path.exists', return_value=True ), patch('importlib.__import__') as mock_import: @@ -90,23 +83,23 @@ def test_valid_inputs(setup_valid_config, setup_auto_script_path): ) mock_import.return_value.auto_populate = mock_auto_populate_ext - config, cmp = auto_populate(setup_valid_config, setup_auto_script_path) + config, cmp = auto_populate(setup_valid_config, Path(setup_auto_script_path)) assert 'DL' in config assert cmp == 'CMP' -def test_missing_general_information(): - with pytest.raises(ValueError) as excinfo: - auto_populate({}, 'some/path') - assert "No Asset Information provided for the auto-population routine." in str( - excinfo.value - ) +def test_missing_general_information() -> None: + with pytest.raises( + ValueError, + match='No Asset Information provided for the auto-population routine.', + ): + auto_populate({}, Path('some/path')) def test_pelicun_default_path_replacement( - setup_auto_script_path, setup_expected_base_path -): + setup_auto_script_path: str, setup_expected_base_path: str +) -> None: modified_path = setup_auto_script_path.replace( 'PelicunDefault/', setup_expected_base_path ) @@ -114,8 +107,8 @@ def test_pelicun_default_path_replacement( def test_auto_population_script_execution( - setup_valid_config, setup_auto_script_path -): + setup_valid_config: dict, setup_auto_script_path: str +) -> None: with patch('pelicun.base.pelicun_path', '/expected/path'), patch( 'os.path.exists', return_value=True ), patch('importlib.__import__') as mock_import: @@ -124,5 +117,5 @@ def test_auto_population_script_execution( ) mock_import.return_value.auto_populate = mock_auto_populate_ext - auto_populate(setup_valid_config, setup_auto_script_path) + auto_populate(setup_valid_config, Path(setup_auto_script_path)) mock_import.assert_called_once() diff --git a/pelicun/tests/basic/test_base.py b/pelicun/tests/basic/test_base.py index fd10514e2..577df1430 100644 --- a/pelicun/tests/basic/test_base.py +++ b/pelicun/tests/basic/test_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,47 +37,47 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -These are unit and integration tests on the base module of pelicun. -""" +"""These are unit and integration tests on the base module of pelicun.""" from __future__ import annotations -import os + +import argparse import io import re import tempfile from contextlib import redirect_stdout -import argparse -import pytest -import pandas as pd +from pathlib import Path + import numpy as np -from pelicun import base +import pandas as pd +import pytest +from pelicun import base +from pelicun.base import ensure_value # The tests maintain the order of definitions of the `base.py` file. -def test_options_init(): - +def test_options_init() -> None: temp_dir = tempfile.mkdtemp() # Create a sample user_config_options dictionary user_config_options = { - "Verbose": False, - "Seed": None, - "LogShowMS": False, - "LogFile": f'{temp_dir}/test_log_file', - "PrintLog": False, - "DemandOffset": {"PFA": -1, "PFV": -1}, - "Sampling": { - "SamplingMethod": "MonteCarlo", - "SampleSize": 1000, - "PreserveRawOrder": False, + 'Verbose': False, + 'Seed': None, + 'LogShowMS': False, + 'LogFile': f'{temp_dir}/test_log_file', + 'PrintLog': False, + 'DemandOffset': {'PFA': -1, 'PFV': -1}, + 'Sampling': { + 'SamplingMethod': 'MonteCarlo', + 'SampleSize': 1000, + 'PreserveRawOrder': False, }, - "SamplingMethod": "MonteCarlo", - "NonDirectionalMultipliers": {"ALL": 1.2}, - "EconomiesOfScale": {"AcrossFloors": True, "AcrossDamageStates": True}, - "RepairCostAndTimeCorrelation": 0.7, + 'SamplingMethod': 'MonteCarlo', + 'NonDirectionalMultipliers': {'ALL': 1.2}, + 'EconomiesOfScale': {'AcrossFloors': True, 'AcrossDamageStates': True}, + 'RepairCostAndTimeCorrelation': 0.7, } # Create an Options object using the user_config_options @@ -95,13 +94,13 @@ def test_options_init(): assert options.demand_offset == {'PFA': -1, 'PFV': -1} assert options.nondir_multi_dict == {'ALL': 1.2} assert options.rho_cost_time == 0.7 - assert options.eco_scale == {"AcrossFloors": True, "AcrossDamageStates": True} + assert options.eco_scale == {'AcrossFloors': True, 'AcrossDamageStates': True} # Check that the Logger object attribute of the Options object is # initialized with the correct parameters assert options.log.verbose is False assert options.log.log_show_ms is False - assert os.path.basename(options.log.log_file) == 'test_log_file' + assert Path(ensure_value(options.log.log_file)).name == 'test_log_file' assert options.log.print_log is False # test seed property and setter @@ -109,16 +108,15 @@ def test_options_init(): assert options.seed == 42 # test rng - # pylint: disable=c-extension-no-member assert isinstance(options.rng, np.random._generator.Generator) -def test_nondir_multi(): +def test_nondir_multi() -> None: options = base.Options({'NonDirectionalMultipliers': {'PFA': 1.5, 'PFV': 1.00}}) assert options.nondir_multi_dict == {'PFA': 1.5, 'PFV': 1.0, 'ALL': 1.2} -def test_logger_init(): +def test_logger_init() -> None: # Test that the Logger object is initialized with the correct # attributes based on the input configuration @@ -130,10 +128,10 @@ def test_logger_init(): 'log_file': f'{temp_dir}/log.txt', 'print_log': True, } - log = base.Logger(**log_config) + log = base.Logger(**log_config) # type: ignore assert log.verbose is True assert log.log_show_ms is False - assert os.path.basename(log.log_file) == 'log.txt' + assert Path(ensure_value(log.log_file)).name == 'log.txt' assert log.print_log is True # test exceptions @@ -144,11 +142,10 @@ def test_logger_init(): 'print_log': True, } with pytest.raises((IsADirectoryError, FileExistsError, FileNotFoundError)): - log = base.Logger(**log_config) - + log = base.Logger(**log_config) # type: ignore -def test_logger_msg(): +def test_logger_msg() -> None: temp_dir = tempfile.mkdtemp() # Test that the msg method prints the correct message to the @@ -159,20 +156,20 @@ def test_logger_msg(): 'log_file': f'{temp_dir}/log.txt', 'print_log': True, } - log = base.Logger(**log_config) + log = base.Logger(**log_config) # type: ignore # Check that the message is printed to the console with io.StringIO() as buf, redirect_stdout(buf): log.msg('This is a message') output = buf.getvalue() assert 'This is a message' in output # Check that the message is written to the log file - with open(f'{temp_dir}/log.txt', 'r', encoding='utf-8') as f: + with Path(f'{temp_dir}/log.txt').open(encoding='utf-8') as f: assert 'This is a message' in f.read() # Check if timestamp is printed with io.StringIO() as buf, redirect_stdout(buf): log.msg( - ('This is a message\nSecond line'), # noqa + ('This is a message\nSecond line'), prepend_timestamp=True, ) output = buf.getvalue() @@ -180,8 +177,7 @@ def test_logger_msg(): assert re.search(pattern, output) is not None -def test_logger_div(): - +def test_logger_div() -> None: temp_dir = tempfile.mkdtemp() # We test the divider with and without the timestamp @@ -199,7 +195,7 @@ def test_logger_div(): 'log_file': f'{temp_dir}/log.txt', 'print_log': True, } - log = base.Logger(**log_config) + log = base.Logger(**log_config) # type: ignore # check console output with io.StringIO() as buf, redirect_stdout(buf): @@ -207,25 +203,24 @@ def test_logger_div(): output = buf.getvalue() assert pattern.match(output) # check log file - with open(f'{temp_dir}/log.txt', 'r', encoding='utf-8') as f: + with Path(f'{temp_dir}/log.txt').open(encoding='utf-8') as f: # simply check that it is not empty assert f.read() -def test_split_file_name(): - file_path = "example.file.name.txt" +def test_split_file_name() -> None: + file_path = 'example.file.name.txt' name, extension = base.split_file_name(file_path) assert name == 'example.file.name' assert extension == '.txt' - file_path = "example" + file_path = 'example' name, extension = base.split_file_name(file_path) assert name == 'example' - assert extension == '' - + assert extension == '' # noqa: PLC1901 -def test_print_system_info(): +def test_print_system_info() -> None: temp_dir = tempfile.mkdtemp() # create a logger object @@ -235,7 +230,7 @@ def test_print_system_info(): 'log_file': f'{temp_dir}/log.txt', 'print_log': True, } - log = base.Logger(**log_config) + log = base.Logger(**log_config) # type: ignore # run print_system_info and get the console output with io.StringIO() as buf, redirect_stdout(buf): @@ -246,7 +241,7 @@ def test_print_system_info(): assert 'System Information:\n' in output -def test_update_vals(): +def test_update_vals() -> None: primary = {'b': {'c': 4, 'd': 5}, 'g': 7} update = {'a': 1, 'b': {'c': 3, 'd': 5}, 'f': 6} base.update_vals(update, primary, 'update', 'primary') @@ -262,18 +257,18 @@ def test_update_vals(): primary = {'a': {'b': 4}} update = {'a': {'b': {'c': 3}}} - with pytest.raises(ValueError): + with pytest.raises(ValueError, match='should not map to a dictionary'): base.update_vals(update, primary, 'update', 'primary') primary = {'a': {'b': 3}} update = {'a': 1, 'b': 2} - with pytest.raises(ValueError): + with pytest.raises(ValueError, match='should map to a dictionary'): base.update_vals(update, primary, 'update', 'primary') -def test_merge_default_config(): +def test_merge_default_config() -> None: # Test merging an empty user config with the default config - user_config = {} + user_config: dict[str, object] | None = {} merged_config = base.merge_default_config(user_config) assert merged_config == base.load_default_options() @@ -302,7 +297,7 @@ def test_merge_default_config(): assert merged_config == {**base.load_default_options(), **user_config} -def test_convert_dtypes(): +def test_convert_dtypes() -> None: # All columns able to be converted # Input DataFrame @@ -368,45 +363,45 @@ def test_convert_dtypes(): ) -def test_convert_to_SimpleIndex(): +def test_convert_to_SimpleIndex() -> None: # Test conversion of a multiindex to a simple index following the # SimCenter dash convention index = pd.MultiIndex.from_tuples((('a', 'b'), ('c', 'd'))) - df = pd.DataFrame([[1, 2], [3, 4]], index=index) - df.index.names = ['name_1', 'name_2'] - df_simple = base.convert_to_SimpleIndex(df, axis=0) - assert df_simple.index.tolist() == ['a-b', 'c-d'] - assert df_simple.index.name == '-'.join(df.index.names) + data = pd.DataFrame([[1, 2], [3, 4]], index=index) + data.index.names = ['name_1', 'name_2'] + data_simple = base.convert_to_SimpleIndex(data, axis=0) + assert data_simple.index.tolist() == ['a-b', 'c-d'] + assert data_simple.index.name == '-'.join(data.index.names) # Test inplace modification - df_inplace = df.copy() + df_inplace = data.copy() base.convert_to_SimpleIndex(df_inplace, axis=0, inplace=True) assert df_inplace.index.tolist() == ['a-b', 'c-d'] - assert df_inplace.index.name == '-'.join(df.index.names) + assert df_inplace.index.name == '-'.join(data.index.names) # Test conversion of columns index = pd.MultiIndex.from_tuples((('a', 'b'), ('c', 'd'))) - df = pd.DataFrame([[1, 2], [3, 4]], columns=index) - df.columns.names = ['name_1', 'name_2'] - df_simple = base.convert_to_SimpleIndex(df, axis=1) - assert df_simple.columns.tolist() == ['a-b', 'c-d'] - assert df_simple.columns.name == '-'.join(df.columns.names) + data = pd.DataFrame([[1, 2], [3, 4]], columns=index) + data.columns.names = ['name_1', 'name_2'] + data_simple = base.convert_to_SimpleIndex(data, axis=1) + assert data_simple.columns.tolist() == ['a-b', 'c-d'] + assert data_simple.columns.name == '-'.join(data.columns.names) # Test inplace modification - df_inplace = df.copy() + df_inplace = data.copy() base.convert_to_SimpleIndex(df_inplace, axis=1, inplace=True) assert df_inplace.columns.tolist() == ['a-b', 'c-d'] - assert df_inplace.columns.name == '-'.join(df.columns.names) + assert df_inplace.columns.name == '-'.join(data.columns.names) # Test invalid axis parameter - with pytest.raises(ValueError): - base.convert_to_SimpleIndex(df, axis=2) + with pytest.raises(ValueError, match='Invalid axis parameter: 2'): + base.convert_to_SimpleIndex(data, axis=2) -def test_convert_to_MultiIndex(): +def test_convert_to_MultiIndex() -> None: # Test a case where the index needs to be converted to a MultiIndex data = pd.DataFrame({'A': (1, 2, 3), 'B': (4, 5, 6)}) - data.index = ('A-1', 'B-1', 'C-1') + data.index = pd.Index(['A-1', 'B-1', 'C-1']) data_converted = base.convert_to_MultiIndex(data, axis=0, inplace=False) expected_index = pd.MultiIndex.from_arrays((('A', 'B', 'C'), ('1', '1', '1'))) assert data_converted.index.equals(expected_index) @@ -414,8 +409,8 @@ def test_convert_to_MultiIndex(): assert data.index.equals(pd.Index(('A-1', 'B-1', 'C-1'))) # Test a case where the index is already a MultiIndex - data_converted = base.convert_to_MultiIndex( - data_converted, axis=0, inplace=False + data_converted = pd.DataFrame( + base.convert_to_MultiIndex(data_converted, axis=0, inplace=False) ) assert data_converted.index.equals(expected_index) @@ -428,42 +423,39 @@ def test_convert_to_MultiIndex(): assert data.columns.equals(pd.Index(('A-1', 'B-1'))) # Test a case where the columns are already a MultiIndex - data_converted = base.convert_to_MultiIndex( - data_converted, axis=1, inplace=False + data_converted = pd.DataFrame( + base.convert_to_MultiIndex(data_converted, axis=1, inplace=False) ) assert data_converted.columns.equals(expected_columns) # Test an invalid axis parameter - with pytest.raises(ValueError): + with pytest.raises(ValueError, match='Invalid axis parameter: 2'): base.convert_to_MultiIndex(data_converted, axis=2, inplace=False) # inplace=True data = pd.DataFrame({'A': (1, 2, 3), 'B': (4, 5, 6)}) - data.index = ('A-1', 'B-1', 'C-1') + data.index = pd.Index(['A-1', 'B-1', 'C-1']) base.convert_to_MultiIndex(data, axis=0, inplace=True) expected_index = pd.MultiIndex.from_arrays((('A', 'B', 'C'), ('1', '1', '1'))) assert data.index.equals(expected_index) -def test_show_matrix(): +def test_show_matrix() -> None: # Test with a simple 2D array - arr = ((1, 2, 3), (4, 5, 6)) + arr = np.array(((1, 2, 3), (4, 5, 6))) base.show_matrix(arr) - assert True # if no AssertionError is thrown, then the test passes # Test with a DataFrame - df = pd.DataFrame(((1, 2, 3), (4, 5, 6)), columns=('a', 'b', 'c')) - base.show_matrix(df) - assert True # if no AssertionError is thrown, then the test passes + data = pd.DataFrame(((1, 2, 3), (4, 5, 6)), columns=('a', 'b', 'c')) + base.show_matrix(data) # Test with use_describe=True base.show_matrix(arr, use_describe=True) - assert True # if no AssertionError is thrown, then the test passes -def test_multiply_factor_multiple_levels(): +def test_multiply_factor_multiple_levels() -> None: # Original DataFrame definition - df = pd.DataFrame( + data = pd.DataFrame( np.full((5, 3), 1.00), index=pd.MultiIndex.from_tuples( [ @@ -501,7 +493,7 @@ def test_multiply_factor_multiple_levels(): ), columns=['col1', 'col2', 'col3'], ) - test_df = df.copy() + test_df = data.copy() base.multiply_factor_multiple_levels(test_df, {'lv1': 'A', 'lv2': 'X'}, 2) pd.testing.assert_frame_equal( test_df, @@ -523,7 +515,7 @@ def test_multiply_factor_multiple_levels(): ), columns=['col1', 'col2', 'col3'], ) - test_df = df.copy() + test_df = data.copy() base.multiply_factor_multiple_levels(test_df, {}, 3) pd.testing.assert_frame_equal(test_df, result_df_all) @@ -572,16 +564,14 @@ def test_multiply_factor_multiple_levels(): ) # Test 4: Multiplication with no matching conditions - with pytest.raises(ValueError) as excinfo: - base.multiply_factor_multiple_levels(df.copy(), {'lv1': 'C'}, 2) - assert ( - str(excinfo.value) == "No rows found matching the conditions: `{'lv1': 'C'}`" - ) + with pytest.raises( + ValueError, match="No rows found matching the conditions: `{'lv1': 'C'}`" + ): + base.multiply_factor_multiple_levels(data.copy(), {'lv1': 'C'}, 2) # Test 5: Invalid axis - with pytest.raises(ValueError) as excinfo: - base.multiply_factor_multiple_levels(df.copy(), {'lv1': 'A'}, 2, axis=2) - assert str(excinfo.value) == "Invalid axis: `2`" + with pytest.raises(ValueError, match='Invalid axis: `2`'): + base.multiply_factor_multiple_levels(data.copy(), {'lv1': 'A'}, 2, axis=2) # Test 6: Empty conditions affecting all rows result_df_empty = pd.DataFrame( @@ -598,13 +588,13 @@ def test_multiply_factor_multiple_levels(): ), columns=['col1', 'col2', 'col3'], ) - testing_df = df.copy() + testing_df = data.copy() base.multiply_factor_multiple_levels(testing_df, {}, 4) pd.testing.assert_frame_equal(testing_df, result_df_empty) -def test_describe(): - expected_idx = pd.Index( +def test_describe() -> None: + expected_idx: pd.Index = pd.Index( ( 'count', 'mean', @@ -628,10 +618,10 @@ def test_describe(): # case 1: # passing a DataFrame - df = pd.DataFrame( + data = pd.DataFrame( ((1.00, 2.00, 3.00), (4.00, 5.00, 6.00)), columns=['A', 'B', 'C'] ) - desc = base.describe(df) + desc = base.describe(data) assert np.all(desc.index == expected_idx) assert np.all(desc.columns == pd.Index(('A', 'B', 'C'), dtype='object')) @@ -658,7 +648,7 @@ def test_describe(): assert np.all(desc.columns == pd.Index((0,), dtype='object')) -def test_str2bool(): +def test_str2bool() -> None: assert base.str2bool('True') is True assert base.str2bool('False') is False assert base.str2bool('yes') is True @@ -667,21 +657,21 @@ def test_str2bool(): assert base.str2bool('f') is False assert base.str2bool('1') is True assert base.str2bool('0') is False - assert base.str2bool(True) is True - assert base.str2bool(False) is False + assert base.str2bool(v=True) is True + assert base.str2bool(v=False) is False with pytest.raises(argparse.ArgumentTypeError): base.str2bool('In most cases, it depends..') -def test_float_or_None(): +def test_float_or_None() -> None: # Test with a string that can be converted to a float - assert base.float_or_None('3.14') == 3.14 + assert base.float_or_None('123.00') == 123.00 # Test with a string that represents an integer assert base.float_or_None('42') == 42.0 # Test with a string that represents a negative number - assert base.float_or_None('-3.14') == -3.14 + assert base.float_or_None('-123.00') == -123.00 # Test with a string that can't be converted to a float assert base.float_or_None('hello') is None @@ -690,7 +680,7 @@ def test_float_or_None(): assert base.float_or_None('') is None -def test_int_or_None(): +def test_int_or_None() -> None: # Test the case when the string can be converted to int assert base.int_or_None('123') == 123 assert base.int_or_None('-456') == -456 @@ -704,8 +694,8 @@ def test_int_or_None(): assert base.int_or_None('') is None -def test_with_parsed_str_na_values(): - df = pd.DataFrame( +def test_with_parsed_str_na_values() -> None: + data = pd.DataFrame( { 'A': [1.00, 2.00, 'N/A', 4.00, 5.00], 'B': ['foo', 'bar', 'NA', 'baz', 'qux'], @@ -713,7 +703,7 @@ def test_with_parsed_str_na_values(): } ) - res = base.with_parsed_str_na_values(df) + res = base.with_parsed_str_na_values(data) pd.testing.assert_frame_equal( res, pd.DataFrame( @@ -726,17 +716,17 @@ def test_with_parsed_str_na_values(): ) -def test_run_input_specs(): - assert os.path.basename(base.pelicun_path) == 'pelicun' +def test_run_input_specs() -> None: + assert Path(base.pelicun_path).name == 'pelicun' -def test_dedupe_index(): +def test_dedupe_index() -> None: tuples = [('A', '1'), ('A', '1'), ('B', '2'), ('B', '3')] index = pd.MultiIndex.from_tuples(tuples, names=['L1', 'L2']) data = np.full((4, 1), 0.00) - df = pd.DataFrame(data, index=index) - df = base.dedupe_index(df) - assert df.to_dict() == { + data_pd = pd.DataFrame(data, index=index) + data_pd = base.dedupe_index(data_pd) + assert data_pd.to_dict() == { 0: { ('A', '1', '0'): 0.0, ('A', '1', '1'): 0.0, @@ -746,90 +736,90 @@ def test_dedupe_index(): } -def test_dict_raise_on_duplicates(): +def test_dict_raise_on_duplicates() -> None: res = base.dict_raise_on_duplicates([('A', '1'), ('B', '2')]) assert res == {'A': '1', 'B': '2'} - with pytest.raises(ValueError): + with pytest.raises(ValueError, match='duplicate key: A'): base.dict_raise_on_duplicates([('A', '1'), ('A', '2')]) -def test_parse_units(): +def test_parse_units() -> None: # Test the default units are parsed correctly units = base.parse_units() assert isinstance(units, dict) expect = { - "sec": 1.0, - "minute": 60.0, - "hour": 3600.0, - "day": 86400.0, - "m": 1.0, - "mm": 0.001, - "cm": 0.01, - "km": 1000.0, - "in": 0.0254, - "inch": 0.0254, - "ft": 0.3048, - "mile": 1609.344, - "m2": 1.0, - "mm2": 1e-06, - "cm2": 0.0001, - "km2": 1000000.0, - "in2": 0.00064516, - "inch2": 0.00064516, - "ft2": 0.09290304, - "mile2": 2589988.110336, - "m3": 1.0, - "in3": 1.6387064e-05, - "inch3": 1.6387064e-05, - "ft3": 0.028316846592, - "cmps": 0.01, - "mps": 1.0, - "mph": 0.44704, - "inps": 0.0254, - "inchps": 0.0254, - "ftps": 0.3048, - "mps2": 1.0, - "inps2": 0.0254, - "inchps2": 0.0254, - "ftps2": 0.3048, - "g": 9.80665, - "kg": 1.0, - "ton": 1000.0, - "lb": 0.453592, - "N": 1.0, - "kN": 1000.0, - "lbf": 4.4482179868, - "kip": 4448.2179868, - "kips": 4448.2179868, - "Pa": 1.0, - "kPa": 1000.0, - "MPa": 1000000.0, - "GPa": 1000000000.0, - "psi": 6894.751669043338, - "ksi": 6894751.669043338, - "Mpsi": 6894751669.043338, - "A": 1.0, - "V": 1.0, - "kV": 1000.0, - "ea": 1.0, - "unitless": 1.0, - "rad": 1.0, - "C": 1.0, - "USD_2011": 1.0, - "USD": 1.0, - "loss_ratio": 1.0, - "worker_day": 1.0, - "EA": 1.0, - "SF": 0.09290304, - "LF": 0.3048, - "TN": 1000.0, - "AP": 1.0, - "CF": 0.0004719474432, - "KV": 1000.0, - "J": 1.0, - "MJ": 1000000.0, - "test_two": 2.00, - "test_three": 3.00, + 'sec': 1.0, + 'minute': 60.0, + 'hour': 3600.0, + 'day': 86400.0, + 'm': 1.0, + 'mm': 0.001, + 'cm': 0.01, + 'km': 1000.0, + 'in': 0.0254, + 'inch': 0.0254, + 'ft': 0.3048, + 'mile': 1609.344, + 'm2': 1.0, + 'mm2': 1e-06, + 'cm2': 0.0001, + 'km2': 1000000.0, + 'in2': 0.00064516, + 'inch2': 0.00064516, + 'ft2': 0.09290304, + 'mile2': 2589988.110336, + 'm3': 1.0, + 'in3': 1.6387064e-05, + 'inch3': 1.6387064e-05, + 'ft3': 0.028316846592, + 'cmps': 0.01, + 'mps': 1.0, + 'mph': 0.44704, + 'inps': 0.0254, + 'inchps': 0.0254, + 'ftps': 0.3048, + 'mps2': 1.0, + 'inps2': 0.0254, + 'inchps2': 0.0254, + 'ftps2': 0.3048, + 'g': 9.80665, + 'kg': 1.0, + 'ton': 1000.0, + 'lb': 0.453592, + 'N': 1.0, + 'kN': 1000.0, + 'lbf': 4.4482179868, + 'kip': 4448.2179868, + 'kips': 4448.2179868, + 'Pa': 1.0, + 'kPa': 1000.0, + 'MPa': 1000000.0, + 'GPa': 1000000000.0, + 'psi': 6894.751669043338, + 'ksi': 6894751.669043338, + 'Mpsi': 6894751669.043338, + 'A': 1.0, + 'V': 1.0, + 'kV': 1000.0, + 'ea': 1.0, + 'unitless': 1.0, + 'rad': 1.0, + 'C': 1.0, + 'USD_2011': 1.0, + 'USD': 1.0, + 'loss_ratio': 1.0, + 'worker_day': 1.0, + 'EA': 1.0, + 'SF': 0.09290304, + 'LF': 0.3048, + 'TN': 1000.0, + 'AP': 1.0, + 'CF': 0.0004719474432, + 'KV': 1000.0, + 'J': 1.0, + 'MJ': 1000000.0, + 'test_two': 2.00, + 'test_three': 3.00, } for thing, value in units.items(): assert thing in expect @@ -851,7 +841,10 @@ def test_parse_units(): # Test that an exception is raised if the additional units file is # not a valid JSON file invalid_json_file = 'pelicun/tests/basic/data/base/test_parse_units/invalid.json' - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match='not a valid JSON file.', + ): units = base.parse_units(invalid_json_file) # Test that an exception is raised if a unit is defined twice in @@ -859,7 +852,10 @@ def test_parse_units(): duplicate_units_file = ( 'pelicun/tests/basic/data/base/test_parse_units/duplicate2.json' ) - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match='sec defined twice', + ): units = base.parse_units(duplicate_units_file) # Test that an exception is raised if a unit conversion factor is not a float @@ -874,11 +870,14 @@ def test_parse_units(): invalid_units_file = ( 'pelicun/tests/basic/data/base/test_parse_units/not_dict.json' ) - with pytest.raises(ValueError): + with pytest.raises( + (ValueError, TypeError), + match="contains first-level keys that don't point to a dictionary", + ): units = base.parse_units(invalid_units_file) -def test_unit_conversion(): +def test_unit_conversion() -> None: # Test scalar conversion from feet to meters assert base.convert_units(1.00, 'ft', 'm') == 0.3048 @@ -901,46 +900,44 @@ def test_unit_conversion(): # Test error handling for invalid input type with pytest.raises(TypeError) as excinfo: - base.convert_units("one", 'ft', 'm') + base.convert_units('one', 'ft', 'm') # type: ignore assert str(excinfo.value) == 'Invalid input type for `values`' # Test error handling for unknown unit - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match='Unknown unit `xyz`'): base.convert_units(1.00, 'xyz', 'm') - assert str(excinfo.value) == 'Unknown unit `xyz`' # Test error handling for mismatched category - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match='Unknown unit: `ft`'): base.convert_units(1.00, 'ft', 'm', category='volume') - assert str(excinfo.value) == 'Unknown unit: `ft`' # Test error handling unknown category - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match='Unknown category: `unknown_category`'): base.convert_units(1.00, 'ft', 'm', category='unknown_category') - assert str(excinfo.value) == 'Unknown category: `unknown_category`' # Test error handling different categories - with pytest.raises(ValueError) as excinfo: + with pytest.raises( + ValueError, + match='`lb` is a `mass` unit, but `m` is not specified in that category.', + ): base.convert_units(1.00, 'lb', 'm') - assert ( - str(excinfo.value) - == '`lb` is a `mass` unit, but `m` is not specified in that category.' - ) -def test_stringterpolation(): +def test_stringterpolation() -> None: func = base.stringterpolation('1,2,3|4,5,6') x_new = np.array([4, 4.5, 5]) expected = np.array([1, 1.5, 2]) np.testing.assert_array_almost_equal(func(x_new), expected) -def test_invert_mapping(): +def test_invert_mapping() -> None: original_dict = {'a': [1, 2], 'b': [3]} expected = {1: 'a', 2: 'a', 3: 'b'} assert base.invert_mapping(original_dict) == expected # with duplicates, raises an error original_dict = {'a': [1, 2], 'b': [2]} - with pytest.raises(ValueError): + with pytest.raises( + ValueError, match='Cannot invert mapping with duplicate values.' + ): base.invert_mapping(original_dict) diff --git a/pelicun/tests/basic/test_damage_model.py b/pelicun/tests/basic/test_damage_model.py index 1e991408f..a2bd9fe88 100644 --- a/pelicun/tests/basic/test_damage_model.py +++ b/pelicun/tests/basic/test_damage_model.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,55 +37,56 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -These are unit and integration tests on the damage model of pelicun. -""" +"""These are unit and integration tests on the damage model of pelicun.""" from __future__ import annotations -from copy import deepcopy + import warnings -import pytest +from copy import deepcopy +from typing import TYPE_CHECKING + import numpy as np import pandas as pd -from pelicun import base -from pelicun import uq -from pelicun.model.damage_model import DamageModel -from pelicun.model.damage_model import DamageModel_Base -from pelicun.model.damage_model import DamageModel_DS -from pelicun.model.damage_model import _is_for_ds_model +import pytest + +from pelicun import base, uq +from pelicun.base import ensure_value +from pelicun.model.damage_model import ( + DamageModel, + DamageModel_Base, + DamageModel_DS, + _is_for_ds_model, +) +from pelicun.pelicun_warnings import PelicunWarning from pelicun.tests.basic.test_pelicun_model import TestPelicunModel -from pelicun.warnings import PelicunWarning -# pylint: disable=missing-function-docstring -# pylint: disable=missing-class-docstring -# pylint: disable=missing-return-doc,missing-return-type-doc +if TYPE_CHECKING: + from pelicun.assessment import Assessment class TestDamageModel(TestPelicunModel): - @pytest.fixture - def damage_model(self, assessment_instance): + def damage_model(self, assessment_instance: Assessment) -> DamageModel: return deepcopy(assessment_instance.damage) - def test___init__(self, damage_model): + def test___init__(self, damage_model: DamageModel) -> None: assert damage_model.log assert damage_model.ds_model with pytest.raises(AttributeError): - damage_model.xyz = 123 + damage_model.xyz = 123 # type: ignore assert damage_model.ds_model.damage_params is None assert damage_model.ds_model.sample is None assert len(damage_model._damage_models) == 1 - def test_damage_models(self, assessment_instance): - + def test_damage_models(self, assessment_instance: Assessment) -> None: damage_model = DamageModel(assessment_instance) assert damage_model._damage_models is not None assert len(damage_model._damage_models) == 1 assert isinstance(damage_model._damage_models[0], DamageModel_DS) - def test_load_model_parameters(self, damage_model): + def test_load_model_parameters(self, damage_model: DamageModel) -> None: path = ( 'pelicun/tests/basic/data/model/test_DamageModel/' 'load_model_parameters/damage_db.csv' @@ -100,11 +100,12 @@ def test_load_model_parameters(self, damage_model): damage_model.load_model_parameters([path], cmp_set, warn_missing=True) assert len(w) == 1 assert ( - "The damage model does not provide damage information " - "for the following component(s) in the asset model: " + 'The damage model does not provide damage information ' + 'for the following component(s) in the asset model: ' "['component.incomplete']." ) in str(w[0].message) damage_parameters = damage_model.ds_model.damage_params + assert damage_parameters is not None assert 'component.A' in damage_parameters.index assert 'component.B' in damage_parameters.index assert 'component.C' not in damage_parameters.index @@ -127,29 +128,28 @@ def test_load_model_parameters(self, damage_model): damage_model.load_model_parameters([path], cmp_set, warn_missing=True) assert len(w) == 1 assert ( - "The damage model does not provide damage " - "information for the following component(s) " + 'The damage model does not provide damage ' + 'information for the following component(s) ' "in the asset model: ['not.exist']." ) in str(w[0].message) - assert damage_model.ds_model.damage_params.empty + assert ensure_value(damage_model.ds_model.damage_params).empty - def test_calculate(self): + def test_calculate(self) -> None: # User-facing methods are coupled with other assessment objects # and are tested in the verification examples. pass - def test_save_sample(self): + def test_save_sample(self) -> None: # User-facing methods are coupled with other assessment objects # and are tested in the verification examples. pass - def test_load_sample(self): + def test_load_sample(self) -> None: # User-facing methods are coupled with other assessment objects # and are tested in the verification examples. pass - def test__get_component_id_set(self, assessment_instance): - + def test__get_component_id_set(self, assessment_instance: Assessment) -> None: damage_model = DamageModel(assessment_instance) damage_model.ds_model.damage_params = pd.DataFrame( @@ -166,8 +166,9 @@ def test__get_component_id_set(self, assessment_instance): assert component_id_set == expected_set - def test__ensure_damage_parameter_availability(self, assessment_instance): - + def test__ensure_damage_parameter_availability( + self, assessment_instance: Assessment + ) -> None: damage_model = DamageModel(assessment_instance) damage_model.ds_model.damage_params = pd.DataFrame( @@ -178,29 +179,26 @@ def test__ensure_damage_parameter_availability(self, assessment_instance): index=pd.Index(['cmp.1', 'cmp.2', 'cmp.3'], name='ID'), ) - cmp_list = ['cmp.1', 'cmp.2', 'cmp.3', 'cmp.4'] + cmp_set = {'cmp.1', 'cmp.2', 'cmp.3', 'cmp.4'} expected_missing_components = ['cmp.4'] with pytest.warns(PelicunWarning) as record: missing_components = damage_model._ensure_damage_parameter_availability( - cmp_list, warn_missing=True + cmp_set, warn_missing=True ) assert missing_components == expected_missing_components assert len(record) == 1 - assert "cmp.4" in str(record[0].message) + assert 'cmp.4' in str(record[0].message) class TestDamageModel_Base(TestPelicunModel): - def test___init__(self, assessment_instance): - + def test___init__(self, assessment_instance: Assessment) -> None: damage_model = DamageModel_Base(assessment_instance) with pytest.raises(AttributeError): - # pylint: disable=assigning-non-slot - damage_model.xyz = 123 - - def test__load_model_parameters(self, assessment_instance): + damage_model.xyz = 123 # type: ignore + def test__load_model_parameters(self, assessment_instance: Assessment) -> None: damage_model = DamageModel_Base(assessment_instance) damage_model.damage_params = pd.DataFrame( @@ -221,7 +219,7 @@ def test__load_model_parameters(self, assessment_instance): index=pd.Index(['cmp.1', 'cmp.3'], name='ID'), ) - damage_model._load_model_parameters(new_data) + damage_model.load_model_parameters(new_data) pd.testing.assert_frame_equal( damage_model.damage_params, @@ -234,12 +232,13 @@ def test__load_model_parameters(self, assessment_instance): ), ) - def test__convert_damage_parameter_units(self, assessment_instance): - + def test__convert_damage_parameter_units( + self, assessment_instance: Assessment + ) -> None: damage_model = DamageModel_Base(assessment_instance) # should have no effect when damage_params is None - damage_model._convert_damage_parameter_units() + damage_model.convert_damage_parameter_units() # converting units from 'g' to 'm/s2' (1g ~ 9.80665 m/s2) @@ -251,7 +250,7 @@ def test__convert_damage_parameter_units(self, assessment_instance): index=pd.Index(['cmp.1', 'cmp.2'], name='ID'), ) - damage_model._convert_damage_parameter_units() + damage_model.convert_damage_parameter_units() pd.testing.assert_frame_equal( damage_model.damage_params, @@ -266,13 +265,14 @@ def test__convert_damage_parameter_units(self, assessment_instance): ), ) - def test__remove_incomplete_components(self, assessment_instance): - + def test__remove_incomplete_components( + self, assessment_instance: Assessment + ) -> None: damage_model = DamageModel_Base(assessment_instance) # with damage_model.damage_params set to None this should have # no effect. - damage_model._remove_incomplete_components() + damage_model.remove_incomplete_components() damage_model.damage_params = pd.DataFrame( { @@ -282,7 +282,7 @@ def test__remove_incomplete_components(self, assessment_instance): index=pd.Index(['cmp.1', 'cmp.2', 'cmp.3', 'cmp.4'], name='ID'), ) - damage_model._remove_incomplete_components() + damage_model.remove_incomplete_components() pd.testing.assert_frame_equal( damage_model.damage_params, @@ -298,14 +298,17 @@ def test__remove_incomplete_components(self, assessment_instance): # with damage_model.damage_params set to None this should have # no effect. - damage_model.damage_params.drop(('Incomplete', ''), axis=1, inplace=True) + damage_model.damage_params = damage_model.damage_params.drop( + ('Incomplete', ''), axis=1 + ) # now, this should also have no effect before = damage_model.damage_params.copy() - damage_model._remove_incomplete_components() + damage_model.remove_incomplete_components() pd.testing.assert_frame_equal(before, damage_model.damage_params) - def test__drop_unused_damage_parameters(self, assessment_instance): - + def test__drop_unused_damage_parameters( + self, assessment_instance: Assessment + ) -> None: damage_model = DamageModel_Base(assessment_instance) damage_model.damage_params = pd.DataFrame( @@ -314,15 +317,14 @@ def test__drop_unused_damage_parameters(self, assessment_instance): cmp_set = {'cmp.1', 'cmp.3'} - damage_model._drop_unused_damage_parameters(cmp_set) + damage_model.drop_unused_damage_parameters(cmp_set) pd.testing.assert_frame_equal( damage_model.damage_params, pd.DataFrame(index=pd.Index(['cmp.1', 'cmp.3'], name='ID')), ) - def test__get_pg_batches(self, assessment_instance): - + def test__get_pg_batches(self, assessment_instance: Assessment) -> None: damage_model = DamageModel_Base(assessment_instance) component_blocks = pd.DataFrame( @@ -370,9 +372,7 @@ def test__get_pg_batches(self, assessment_instance): class TestDamageModel_DS(TestDamageModel_Base): - - def test__obtain_ds_sample(self, assessment_instance): - + def test__obtain_ds_sample(self, assessment_instance: Assessment) -> None: damage_model = DamageModel_DS(assessment_instance) demand_sample = pd.DataFrame( @@ -417,7 +417,7 @@ def test__obtain_ds_sample(self, assessment_instance): index=['cmp.1', 'cmp.2', 'cmp.3'], ).rename_axis('ID') - damage_model._obtain_ds_sample( + damage_model.obtain_ds_sample( demand_sample, component_blocks, block_batch_size, @@ -426,7 +426,7 @@ def test__obtain_ds_sample(self, assessment_instance): nondirectional_multipliers, ) pd.testing.assert_frame_equal( - damage_model.ds_sample, + ensure_value(damage_model.ds_sample), pd.DataFrame( { ('cmp.1', '1', '1', '1', '1'): [1, 1], @@ -438,8 +438,7 @@ def test__obtain_ds_sample(self, assessment_instance): ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'block']), ) - def test__handle_operation(self, assessment_instance): - + def test__handle_operation(self, assessment_instance: Assessment) -> None: damage_model = DamageModel_DS(assessment_instance) assert damage_model._handle_operation(1.00, '+', 1.00) == 2.00 @@ -447,16 +446,14 @@ def test__handle_operation(self, assessment_instance): assert damage_model._handle_operation(1.00, '*', 4.00) == 4.00 assert damage_model._handle_operation(8.00, '/', 8.00) == 1.00 - with pytest.raises(ValueError) as record: + with pytest.raises(ValueError, match='Invalid operation: `%`'): damage_model._handle_operation(1.00, '%', 1.00) - assert ('Invalid operation: `%`') in str(record.value) - - def test__generate_dmg_sample(self, assessment_instance): + def test__generate_dmg_sample(self, assessment_instance: Assessment) -> None: # Create an instance of the damage model damage_model = DamageModel_DS(assessment_instance) - PGB = pd.DataFrame( + pgb = pd.DataFrame( {'Blocks': [1]}, index=pd.MultiIndex.from_tuples( [('cmp.test', '1', '2', '3')], @@ -484,7 +481,7 @@ def test__generate_dmg_sample(self, assessment_instance): sample_size = 2 capacity_sample, lsds_sample = damage_model._generate_dmg_sample( - sample_size, PGB, scaling_specification + sample_size, pgb, scaling_specification ) pd.testing.assert_frame_equal( @@ -506,11 +503,10 @@ def test__generate_dmg_sample(self, assessment_instance): ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'block', 'ls']), ) - def test__create_dmg_RVs(self, assessment_instance): - + def test__create_dmg_RVs(self, assessment_instance: Assessment) -> None: damage_model = DamageModel_DS(assessment_instance) - PGB = pd.DataFrame( + pgb = pd.DataFrame( {'Blocks': [1]}, index=pd.MultiIndex.from_tuples( [ @@ -543,8 +539,8 @@ def test__create_dmg_RVs(self, assessment_instance): scaling_specification = {'cmp.A-1-2': '*1.20'} # Execute the method under test - capacity_RV_reg, lsds_RV_reg = damage_model._create_dmg_RVs( - PGB, scaling_specification + capacity_rv_reg, lsds_rv_reg = damage_model._create_dmg_RVs( + pgb, scaling_specification ) # Now we need to verify the outputs in the registries @@ -552,20 +548,19 @@ def test__create_dmg_RVs(self, assessment_instance): # created correctly. # Example check for presence and properties of a # RandomVariable in the registry: - assert 'FRG-cmp.A-1-2-3-1-1' in capacity_RV_reg.RV + assert 'FRG-cmp.A-1-2-3-1-1' in capacity_rv_reg.RV assert isinstance( - capacity_RV_reg.RV['FRG-cmp.A-1-2-3-1-1'], + capacity_rv_reg.RV['FRG-cmp.A-1-2-3-1-1'], uq.LogNormalRandomVariable, ) - assert 'LSDS-cmp.A-1-2-3-1-1' in lsds_RV_reg.RV + assert 'LSDS-cmp.A-1-2-3-1-1' in lsds_rv_reg.RV assert isinstance( - lsds_RV_reg.RV['LSDS-cmp.A-1-2-3-1-1'], + lsds_rv_reg.RV['LSDS-cmp.A-1-2-3-1-1'], uq.MultinomialRandomVariable, ) - def test__evaluate_damage_state(self, assessment_instance): - + def test__evaluate_damage_state(self, assessment_instance: Assessment) -> None: # We define a single component with 3 limit states. # The last limit state can have two damage states, DS3 and DS4. # We test that the damage state assignments are correct. @@ -609,8 +604,7 @@ def test__evaluate_damage_state(self, assessment_instance): ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'block']), ) - def test__prepare_dmg_quantities(self, assessment_instance): - + def test__prepare_dmg_quantities(self, assessment_instance: Assessment) -> None: # # A case with blocks # @@ -638,10 +632,10 @@ def test__prepare_dmg_quantities(self, assessment_instance): index=pd.MultiIndex.from_tuples([('A', '0', '1', '0')]), ).rename_axis(index=['cmp', 'loc', 'dir', 'uid']) - res = damage_model._prepare_dmg_quantities( + res = damage_model.prepare_dmg_quantities( component_sample, component_marginal_parameters, - True, + dropzero=True, ) # Each block takes half the quantity. @@ -678,10 +672,10 @@ def test__prepare_dmg_quantities(self, assessment_instance): }, ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']) - res = damage_model._prepare_dmg_quantities( + res = damage_model.prepare_dmg_quantities( component_sample, None, - True, + dropzero=True, ) # Realization 0: Expect NaNs @@ -719,10 +713,10 @@ def test__prepare_dmg_quantities(self, assessment_instance): }, ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']) - res = damage_model._prepare_dmg_quantities( + res = damage_model.prepare_dmg_quantities( component_sample, None, - True, + dropzero=True, ) pd.testing.assert_frame_equal( @@ -734,10 +728,10 @@ def test__prepare_dmg_quantities(self, assessment_instance): ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'ds']), ) - res = damage_model._prepare_dmg_quantities( + res = damage_model.prepare_dmg_quantities( component_sample, None, - False, + dropzero=False, ) pd.testing.assert_frame_equal( @@ -751,8 +745,7 @@ def test__prepare_dmg_quantities(self, assessment_instance): ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'ds']), ) - def test__perform_dmg_task(self, assessment_instance): - + def test__perform_dmg_task(self, assessment_instance: Assessment) -> None: # noqa: C901 damage_model = DamageModel_DS(assessment_instance) # @@ -769,9 +762,9 @@ def test__perform_dmg_task(self, assessment_instance): dtype='int32', ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']) - dmg_process = {"1_CMP.B": {"DS1": "CMP.A_DS4"}} + dmg_process = {'1_CMP.B': {'DS1': 'CMP.A_DS4'}} for task in dmg_process.items(): - damage_model._perform_dmg_task(task) + damage_model.perform_dmg_task(task) pd.testing.assert_frame_equal( damage_model.ds_sample, @@ -800,9 +793,9 @@ def test__perform_dmg_task(self, assessment_instance): dtype='int32', ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']) - dmg_process = {"1_CMP.B": {"DS1": "CMP.A_NA"}} + dmg_process = {'1_CMP.B': {'DS1': 'CMP.A_NA'}} for task in dmg_process.items(): - damage_model._perform_dmg_task(task) + damage_model.perform_dmg_task(task) pd.testing.assert_frame_equal( damage_model.ds_sample, @@ -833,9 +826,9 @@ def test__perform_dmg_task(self, assessment_instance): dtype='int32', ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']) - dmg_process = {"1_CMP.B-LOC": {"DS1": "CMP.A_DS4"}} + dmg_process = {'1_CMP.B-LOC': {'DS1': 'CMP.A_DS4'}} for task in dmg_process.items(): - damage_model._perform_dmg_task(task) + damage_model.perform_dmg_task(task) pd.testing.assert_frame_equal( damage_model.ds_sample, @@ -867,9 +860,9 @@ def test__perform_dmg_task(self, assessment_instance): dtype='int32', ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']) - dmg_process = {"1_CMP.A": {"DS1": "ALL_DS2"}} + dmg_process = {'1_CMP.A': {'DS1': 'ALL_DS2'}} for task in dmg_process.items(): - damage_model._perform_dmg_task(task) + damage_model.perform_dmg_task(task) pd.testing.assert_frame_equal( damage_model.ds_sample, @@ -900,9 +893,9 @@ def test__perform_dmg_task(self, assessment_instance): dtype='int32', ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']) - dmg_process = {"1_CMP.B": {"DS1": "CMP.A_NA"}} + dmg_process = {'1_CMP.B': {'DS1': 'CMP.A_NA'}} for task in dmg_process.items(): - damage_model._perform_dmg_task(task) + damage_model.perform_dmg_task(task) pd.testing.assert_frame_equal( damage_model.ds_sample, @@ -931,9 +924,9 @@ def test__perform_dmg_task(self, assessment_instance): dtype='int32', ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']) - dmg_process = {"1_CMP.B-LOC": {"DS1": "CMP.A_NA"}} + dmg_process = {'1_CMP.B-LOC': {'DS1': 'CMP.A_NA'}} for task in dmg_process.items(): - damage_model._perform_dmg_task(task) + damage_model.perform_dmg_task(task) pd.testing.assert_frame_equal( damage_model.ds_sample, @@ -964,9 +957,9 @@ def test__perform_dmg_task(self, assessment_instance): dtype='int32', ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']) - dmg_process = {"1_CMP.A-LOC": {"DS1": "ALL_NA"}} + dmg_process = {'1_CMP.A-LOC': {'DS1': 'ALL_NA'}} for task in dmg_process.items(): - damage_model._perform_dmg_task(task) + damage_model.perform_dmg_task(task) pd.testing.assert_frame_equal( damage_model.ds_sample, @@ -994,10 +987,10 @@ def test__perform_dmg_task(self, assessment_instance): dtype='int32', ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']) - dmg_process = {"1_CMP.C": {"DS1": "CMP.A_DS4"}} + dmg_process = {'1_CMP.C': {'DS1': 'CMP.A_DS4'}} with pytest.warns(PelicunWarning) as record: for task in dmg_process.items(): - damage_model._perform_dmg_task(task) + damage_model.perform_dmg_task(task) assert ( 'Source component `CMP.C` in the prescribed damage process not found' ) in str(record.list[0].message) @@ -1005,10 +998,10 @@ def test__perform_dmg_task(self, assessment_instance): # # Test warnings: Target component not found # - dmg_process = {"1_CMP.A": {"DS1": "CMP.C_DS4"}} + dmg_process = {'1_CMP.A': {'DS1': 'CMP.C_DS4'}} with pytest.warns(PelicunWarning) as record: for task in dmg_process.items(): - damage_model._perform_dmg_task(task) + damage_model.perform_dmg_task(task) assert ( 'Target component `CMP.C` in the prescribed damage process not found' ) in str(record.list[0].message) @@ -1016,23 +1009,22 @@ def test__perform_dmg_task(self, assessment_instance): # # Test Error: Unable to parse source event # - dmg_process = {"1_CMP.A": {"XYZ": "CMP.B_DS1"}} - with pytest.raises(ValueError) as record: - for task in dmg_process.items(): - damage_model._perform_dmg_task(task) - assert ('Unable to parse source event in damage process: `XYZ`') in str( - record.value - ) - dmg_process = {"1_CMP.A": {"DS1": "CMP.B_ABC"}} - with pytest.raises(ValueError) as record: - for task in dmg_process.items(): - damage_model._perform_dmg_task(task) - assert ('Unable to parse target event in damage process: `ABC`') in str( - record.value - ) - - def test__complete_ds_cols(self, assessment_instance): + dmg_process = {'1_CMP.A': {'XYZ': 'CMP.B_DS1'}} + for task in dmg_process.items(): + with pytest.raises( + ValueError, + match='Unable to parse source event in damage process: `XYZ`', + ): + damage_model.perform_dmg_task(task) + dmg_process = {'1_CMP.A': {'DS1': 'CMP.B_ABC'}} + for task in dmg_process.items(): + with pytest.raises( + ValueError, + match='Unable to parse target event in damage process: `ABC`', + ): + damage_model.perform_dmg_task(task) + def test__complete_ds_cols(self, assessment_instance: Assessment) -> None: damage_model = DamageModel_DS(assessment_instance) # the method needs damage parameters damage_model.damage_params = base.convert_to_MultiIndex( @@ -1053,7 +1045,7 @@ def test__complete_ds_cols(self, assessment_instance): ('single.ds', '0', '0', '0', '1'): [100.00], }, ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'ds']) - out = damage_model._complete_ds_cols(dmg_sample) + out = damage_model.complete_ds_cols(dmg_sample) pd.testing.assert_frame_equal( out, pd.DataFrame( @@ -1069,8 +1061,7 @@ def test__complete_ds_cols(self, assessment_instance): ) -def test__is_for_ds_model(): - +def test__is_for_ds_model() -> None: data_with_ls1 = pd.DataFrame( { ('LS1', 'Theta_0'): [0.5], diff --git a/pelicun/tests/basic/test_demand_model.py b/pelicun/tests/basic/test_demand_model.py index 25e0332a3..2d4be8f12 100644 --- a/pelicun/tests/basic/test_demand_model.py +++ b/pelicun/tests/basic/test_demand_model.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,89 +37,113 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -These are unit and integration tests on the demand model of pelicun. -""" +"""These are unit and integration tests on the demand model of pelicun.""" from __future__ import annotations -from collections import defaultdict -import os + import tempfile import warnings +from collections import defaultdict from copy import deepcopy -import pytest +from pathlib import Path +from typing import TYPE_CHECKING + import numpy as np import pandas as pd +import pytest + +from pelicun.base import ensure_value +from pelicun.model.demand_model import ( + DemandModel, + _assemble_required_demand_data, + _get_required_demand_type, +) from pelicun.tests.basic.test_model import TestModelModule -from pelicun.model.demand_model import _get_required_demand_type -from pelicun.model.demand_model import _assemble_required_demand_data -# pylint: disable=unused-argument -# pylint: disable=missing-function-docstring -# pylint: disable=missing-class-docstring -# pylint: disable=missing-return-doc,missing-return-type-doc +if TYPE_CHECKING: + from pelicun.assessment import Assessment -class TestDemandModel(TestModelModule): +class TestDemandModel(TestModelModule): # noqa: PLR0904 @pytest.fixture - def demand_model(self, assessment_instance): + def demand_model(self, assessment_instance: Assessment) -> DemandModel: return deepcopy(assessment_instance.demand) @pytest.fixture - def demand_model_with_sample(self, assessment_instance): + def demand_model_with_sample( + self, assessment_instance: Assessment + ) -> DemandModel: mdl = assessment_instance.demand mdl.load_sample( 'pelicun/tests/basic/data/model/' 'test_DemandModel/load_sample/demand_sample_A.csv' ) - return deepcopy(mdl) + model_copy = deepcopy(mdl) + assert isinstance(model_copy, DemandModel) + return model_copy @pytest.fixture - def calibrated_demand_model(self, demand_model_with_sample): + def calibrated_demand_model( + self, demand_model_with_sample: DemandModel + ) -> DemandModel: config = { - "ALL": { - "DistributionFamily": "normal", - "AddUncertainty": 0.00, + 'ALL': { + 'DistributionFamily': 'normal', + 'AddUncertainty': 0.00, }, - "PID": { - "DistributionFamily": "lognormal", - "TruncateUpper": "0.06", + 'PID': { + 'DistributionFamily': 'lognormal', + 'TruncateUpper': '0.06', }, - "SA": { - "DistributionFamily": "empirical", + 'SA': { + 'DistributionFamily': 'empirical', }, } demand_model_with_sample.calibrate_model(config) - return deepcopy(demand_model_with_sample) + model_copy = deepcopy(demand_model_with_sample) + assert isinstance(model_copy, DemandModel) + return model_copy @pytest.fixture - def demand_model_with_sample_B(self, assessment_instance): + def demand_model_with_sample_b( + self, assessment_instance: Assessment + ) -> DemandModel: mdl = assessment_instance.demand mdl.load_sample( 'pelicun/tests/basic/data/model/' 'test_DemandModel/load_sample/demand_sample_B.csv' ) - return deepcopy(mdl) + model_copy = deepcopy(mdl) + assert isinstance(model_copy, DemandModel) + return model_copy @pytest.fixture - def demand_model_with_sample_C(self, assessment_instance): + def demand_model_with_sample_c( + self, assessment_instance: Assessment + ) -> DemandModel: mdl = assessment_instance.demand mdl.load_sample( 'pelicun/tests/basic/data/model/' 'test_DemandModel/load_sample/demand_sample_C.csv' ) - return deepcopy(mdl) + model_copy = deepcopy(mdl) + assert isinstance(model_copy, DemandModel) + return model_copy @pytest.fixture - def demand_model_with_sample_D(self, assessment_instance): + def demand_model_with_sample_d( + self, assessment_instance: Assessment + ) -> DemandModel: mdl = assessment_instance.demand mdl.load_sample( 'pelicun/tests/basic/data/model/' 'test_DemandModel/load_sample/demand_sample_D.csv' ) - return deepcopy(mdl) + model_copy = deepcopy(mdl) + assert isinstance(model_copy, DemandModel) + return model_copy - def test_init(self, demand_model): + def test_init(self, demand_model: DemandModel) -> None: assert demand_model.log assert demand_model.marginal_params is None @@ -130,19 +153,20 @@ def test_init(self, demand_model): assert demand_model._RVs is None assert demand_model.sample is None - def test_save_sample(self, demand_model_with_sample): + def test_save_sample(self, demand_model_with_sample: DemandModel) -> None: # instantiate a temporary directory in memory temp_dir = tempfile.mkdtemp() # save the sample there demand_model_with_sample.save_sample(f'{temp_dir}/temp.csv') - with open(f'{temp_dir}/temp.csv', 'r', encoding='utf-8') as f: + with Path(f'{temp_dir}/temp.csv').open(encoding='utf-8') as f: contents = f.read() assert contents == ( ',PFA-0-1,PFA-1-1,PID-1-1,SA_0.23-0-1\n' 'Units,inps2,inps2,rad,inps2\n' '0,158.62478,397.04389,0.02672,342.149\n' ) - res = demand_model_with_sample.save_sample(save_units=False) + res = demand_model_with_sample.save_sample() + assert isinstance(res, pd.DataFrame) assert res.to_dict() == { ('PFA', '0', '1'): {0: 158.62478}, ('PFA', '1', '1'): {0: 397.04389}, @@ -150,15 +174,19 @@ def test_save_sample(self, demand_model_with_sample): ('SA_0.23', '0', '1'): {0: 342.149}, } - def test_load_sample(self, demand_model_with_sample, demand_model_with_sample_B): + def test_load_sample( + self, + demand_model_with_sample: DemandModel, + demand_model_with_sample_b: DemandModel, + ) -> None: # retrieve the loaded sample and units - obtained_sample = demand_model_with_sample.sample - obtained_units = demand_model_with_sample.user_units + obtained_sample = ensure_value(demand_model_with_sample.sample) + obtained_units = ensure_value(demand_model_with_sample.user_units) - obtained_sample_2 = demand_model_with_sample_B.sample - obtained_units_2 = demand_model_with_sample_B.user_units + obtained_sample_2 = ensure_value(demand_model_with_sample_b.sample) + obtained_units_2 = ensure_value(demand_model_with_sample_b.user_units) - # demand_sample_A.csv and demand_sample_B.csv only differ in the + # demand_sample_A.csv and demand_sample_b.csv only differ in the # headers, where the first includes a tag for the hazard # level. Therefore, the two files are expected to result to the # same `obtained_sample` @@ -212,174 +240,187 @@ def test_load_sample(self, demand_model_with_sample, demand_model_with_sample_B) check_index_type=False, ) - def test_estimate_RID(self, demand_model_with_sample): - demands = demand_model_with_sample.sample['PID'] + def test_estimate_RID(self, demand_model_with_sample: DemandModel) -> None: + demands = ensure_value(demand_model_with_sample.sample)['PID'] params = {'yield_drift': 0.01} res = demand_model_with_sample.estimate_RID(demands, params) assert list(res.columns) == [('RID', '1', '1')] - with pytest.raises(ValueError): + with pytest.raises(ValueError, match='Invalid method: `xyz`'): demand_model_with_sample.estimate_RID(demands, params, method='xyz') - def test_expand_sample_float(self, demand_model_with_sample): - sample_before = demand_model_with_sample.sample.copy() - demand_model_with_sample.expand_sample("test_lab", 1.00, "unitless") - sample_after = demand_model_with_sample.sample.copy() + def test_expand_sample_float( + self, demand_model_with_sample: DemandModel + ) -> None: + sample_before = ensure_value(demand_model_with_sample.sample).copy() + demand_model_with_sample.expand_sample('test_lab', 1.00, 'unitless') + sample_after = ensure_value(demand_model_with_sample.sample).copy() pd.testing.assert_frame_equal( sample_before, sample_after.drop('test_lab', axis=1) ) assert sample_after.loc[0, ('test_lab', '0', '1')] == 1.0 - def test_expand_sample_numpy(self, demand_model_with_sample): - sample_before = demand_model_with_sample.sample.copy() - demand_model_with_sample.expand_sample( - "test_lab", np.array(1.00), "unitless" - ) - sample_after = demand_model_with_sample.sample.copy() + def test_expand_sample_numpy( + self, demand_model_with_sample: DemandModel + ) -> None: + sample_before = ensure_value(demand_model_with_sample.sample).copy() + demand_model_with_sample.expand_sample('test_lab', 1.00, 'unitless') + sample_after = ensure_value(demand_model_with_sample.sample).copy() pd.testing.assert_frame_equal( sample_before, sample_after.drop('test_lab', axis=1) ) assert sample_after.loc[0, ('test_lab', '0', '1')] == 1.0 - def test_expand_sample_error_no_sample(self, demand_model): + def test_expand_sample_error_no_sample(self, demand_model: DemandModel) -> None: with pytest.raises( ValueError, match='Demand model does not have a sample yet.' ): - demand_model.expand_sample("test_lab", 1.00, "unitless") + demand_model.expand_sample('test_lab', np.array((1.00,)), 'unitless') - def test_expand_sample_error_wrong_shape(self, demand_model_with_sample): + def test_expand_sample_error_wrong_shape( + self, demand_model_with_sample: DemandModel + ) -> None: with pytest.raises(ValueError, match='Incompatible array length.'): demand_model_with_sample.expand_sample( - "test_lab", np.array((1.00, 1.00)), "unitless" + 'test_lab', np.array((1.00, 1.00)), 'unitless' ) def test_calibrate_model( - self, calibrated_demand_model, demand_model_with_sample_C - ): - assert calibrated_demand_model.marginal_params['Family'].to_list() == [ + self, + calibrated_demand_model: DemandModel, + ) -> None: + assert ensure_value(calibrated_demand_model.marginal_params)[ + 'Family' + ].to_list() == [ 'normal', 'normal', 'lognormal', 'empirical', ] assert ( - calibrated_demand_model.marginal_params.at[ + ensure_value(calibrated_demand_model.marginal_params).loc[ ('PID', '1', '1'), 'TruncateUpper' ] == 0.06 ) def test_calibrate_model_censoring( - self, calibrated_demand_model, demand_model_with_sample_C - ): + self, + demand_model_with_sample_c: DemandModel, + ) -> None: # with a config featuring censoring the RIDs config = { - "ALL": { - "DistributionFamily": "normal", - "AddUncertainty": 0.00, + 'ALL': { + 'DistributionFamily': 'normal', + 'AddUncertainty': 0.00, }, - "PID": { - "DistributionFamily": "lognormal", - "CensorUpper": "0.05", + 'PID': { + 'DistributionFamily': 'lognormal', + 'CensorUpper': '0.05', }, } - demand_model_with_sample_C.calibrate_model(config) + demand_model_with_sample_c.calibrate_model(config) def test_calibrate_model_truncation( - self, calibrated_demand_model, demand_model_with_sample_C - ): + self, + demand_model_with_sample_c: DemandModel, + ) -> None: # with a config that specifies a truncation limit smaller than # the samples config = { - "ALL": { - "DistributionFamily": "normal", - "AddUncertainty": 0.00, + 'ALL': { + 'DistributionFamily': 'normal', + 'AddUncertainty': 0.00, }, - "PID": { - "DistributionFamily": "lognormal", - "TruncateUpper": "0.04", + 'PID': { + 'DistributionFamily': 'lognormal', + 'TruncateUpper': '0.04', }, } - demand_model_with_sample_C.calibrate_model(config) + demand_model_with_sample_c.calibrate_model(config) def test_save_load_model_with_empirical( - self, calibrated_demand_model, assessment_instance - ): - + self, calibrated_demand_model: DemandModel, assessment_instance: Assessment + ) -> None: # a model that has empirical marginal parameters temp_dir = tempfile.mkdtemp() calibrated_demand_model.save_model(f'{temp_dir}/temp') - assert os.path.exists(f'{temp_dir}/temp_marginals.csv') - assert os.path.exists(f'{temp_dir}/temp_empirical.csv') - assert os.path.exists(f'{temp_dir}/temp_correlation.csv') + assert Path(f'{temp_dir}/temp_marginals.csv').exists() + assert Path(f'{temp_dir}/temp_empirical.csv').exists() + assert Path(f'{temp_dir}/temp_correlation.csv').exists() # Load model to a different DemandModel instance to verify new_demand_model = assessment_instance.demand new_demand_model.load_model(f'{temp_dir}/temp') pd.testing.assert_frame_equal( - calibrated_demand_model.marginal_params, - new_demand_model.marginal_params, + ensure_value(calibrated_demand_model.marginal_params), + ensure_value(new_demand_model.marginal_params), atol=1e-4, check_index_type=False, check_column_type=False, ) pd.testing.assert_frame_equal( - calibrated_demand_model.correlation, - new_demand_model.correlation, + ensure_value(calibrated_demand_model.correlation), + ensure_value(new_demand_model.correlation), atol=1e-4, check_index_type=False, check_column_type=False, ) pd.testing.assert_frame_equal( - calibrated_demand_model.empirical_data, - new_demand_model.empirical_data, + ensure_value(calibrated_demand_model.empirical_data), + ensure_value(new_demand_model.empirical_data), atol=1e-4, check_index_type=False, check_column_type=False, ) def test_save_load_model_without_empirical( - self, demand_model_with_sample_C, assessment_instance - ): + self, + demand_model_with_sample_c: DemandModel, + assessment_instance: Assessment, + ) -> None: # a model that does not have empirical marginal parameters temp_dir = tempfile.mkdtemp() config = { - "ALL": { - "DistributionFamily": "normal", - "AddUncertainty": 0.00, + 'ALL': { + 'DistributionFamily': 'normal', + 'AddUncertainty': 0.00, }, - "PID": { - "DistributionFamily": "lognormal", - "TruncateUpper": "0.04", + 'PID': { + 'DistributionFamily': 'lognormal', + 'TruncateUpper': '0.04', }, } - demand_model_with_sample_C.calibrate_model(config) - demand_model_with_sample_C.save_model(f'{temp_dir}/temp') - assert os.path.exists(f'{temp_dir}/temp_marginals.csv') - assert os.path.exists(f'{temp_dir}/temp_correlation.csv') + demand_model_with_sample_c.calibrate_model(config) + demand_model_with_sample_c.save_model(f'{temp_dir}/temp') + assert Path(f'{temp_dir}/temp_marginals.csv').exists() + assert Path(f'{temp_dir}/temp_correlation.csv').exists() # Load model to a different DemandModel instance to verify new_demand_model = assessment_instance.demand new_demand_model.load_model(f'{temp_dir}/temp') pd.testing.assert_frame_equal( - demand_model_with_sample_C.marginal_params, - new_demand_model.marginal_params, + ensure_value(demand_model_with_sample_c.marginal_params), + ensure_value(new_demand_model.marginal_params), ) pd.testing.assert_frame_equal( - demand_model_with_sample_C.correlation, new_demand_model.correlation + ensure_value(demand_model_with_sample_c.correlation), + ensure_value(new_demand_model.correlation), ) - assert demand_model_with_sample_C.empirical_data is None + assert demand_model_with_sample_c.empirical_data is None assert new_demand_model.empirical_data is None - def test_generate_sample_exceptions(self, demand_model): + def test_generate_sample_exceptions(self, demand_model: DemandModel) -> None: # generating a sample from a non calibrated model should fail - with pytest.raises(ValueError): + with pytest.raises( + ValueError, match='Model parameters have not been specified' + ): demand_model.generate_sample( - {"SampleSize": 3, 'PreserveRawOrder': False} + {'SampleSize': 3, 'PreserveRawOrder': False} ) - def test_generate_sample(self, calibrated_demand_model): + def test_generate_sample(self, calibrated_demand_model: DemandModel) -> None: calibrated_demand_model.generate_sample( - {"SampleSize": 3, 'PreserveRawOrder': False} + {'SampleSize': 3, 'PreserveRawOrder': False} ) # get the generated demand sample @@ -427,7 +468,9 @@ def test_generate_sample(self, calibrated_demand_model): check_index_type=False, ) - def test_generate_sample_with_demand_cloning(self, assessment_instance): + def test_generate_sample_with_demand_cloning( + self, assessment_instance: Assessment + ) -> None: # # used for debugging # assessment_instance = assessment.Assessment() @@ -442,8 +485,8 @@ def test_generate_sample_with_demand_cloning(self, assessment_instance): ) demand_model.calibrate_model( { - "ALL": { - "DistributionFamily": "lognormal", + 'ALL': { + 'DistributionFamily': 'lognormal', }, } ) @@ -461,12 +504,12 @@ def test_generate_sample_with_demand_cloning(self, assessment_instance): ) assert len(w) == 1 assert ( - "The demand cloning configuration lists columns " + 'The demand cloning configuration lists columns ' "that are not present in the original demand sample's " "columns: ['not_present']." ) in str(w[0].message) # we'll just get a warning for the `not_present` entry - assert demand_model.sample.columns.to_list() == [ + assert ensure_value(demand_model.sample).columns.to_list() == [ ('PGA', '0', '1'), ('PGV', '0', '1'), ('PGV', '0', '2'), @@ -479,12 +522,14 @@ def test_generate_sample_with_demand_cloning(self, assessment_instance): ('PGV', '2', '3'), ] assert np.array_equal( - demand_model.sample[('PGV', '0', '1')].values, - demand_model.sample[('PGV', '0', '3')].values, + demand_model.sample['PGV', '0', '1'].values, # type: ignore + demand_model.sample['PGV', '0', '3'].values, # type: ignore ) # exceptions # Duplicate entries in demand cloning configuration - with pytest.raises(ValueError): + with pytest.raises( + ValueError, match='Duplicate entries in demand cloning configuration.' + ): demand_model.generate_sample( { 'SampleSize': 1000, @@ -496,8 +541,9 @@ def test_generate_sample_with_demand_cloning(self, assessment_instance): } ) - def test__get_required_demand_type(self, assessment_instance): - + def test__get_required_demand_type( + self, assessment_instance: Assessment + ) -> None: # Simple case: single demand damage_model = assessment_instance.damage cmp_set = {'testing.component'} @@ -513,7 +559,7 @@ def test__get_required_demand_type(self, assessment_instance): ).T.rename_axis(index=['cmp', 'loc', 'dir', 'uid']) demand_offset = {'PFA': 0} required = _get_required_demand_type( - damage_model.ds_model.damage_params, pgb, demand_offset + ensure_value(damage_model.ds_model.damage_params), pgb, demand_offset ) expected = defaultdict( list, @@ -536,21 +582,21 @@ def test__get_required_demand_type(self, assessment_instance): ).T.rename_axis(index=['cmp', 'loc', 'dir', 'uid']) demand_offset = {'PFA': 0} required = _get_required_demand_type( - damage_model.ds_model.damage_params, pgb, demand_offset + ensure_value(damage_model.ds_model.damage_params), pgb, demand_offset ) expected = defaultdict( list, { - (('PID-1-1', 'PFA-1-1'), 'sqrt(X1^2+X2^2)'): [ + (('PID-1-1', 'PFA-1-1'), 'sqrt(X1^2+X2^2)'): [ # type: ignore ('testing.component', '1', '1', '1') ] }, ) assert required == expected - def test__assemble_required_demand_data(self, assessment_instance): - - # Utility demand case: two demands are required + def test__assemble_required_demand_data( + self, assessment_instance: Assessment + ) -> None: damage_model = assessment_instance.damage cmp_set = {'testing.component'} damage_model.load_model_parameters( @@ -576,7 +622,9 @@ def test__assemble_required_demand_data(self, assessment_instance): } ) demand_data = _assemble_required_demand_data( - required_edps, nondirectional_multipliers, demand_sample + required_edps, # type: ignore + nondirectional_multipliers, + demand_sample, ) expected = { (('PID-1-1', 'PFA-1-1'), 'sqrt(X1^2+X2^2)'): np.array( diff --git a/pelicun/tests/basic/test_file_io.py b/pelicun/tests/basic/test_file_io.py index 940e3d564..06444a707 100644 --- a/pelicun/tests/basic/test_file_io.py +++ b/pelicun/tests/basic/test_file_io.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,58 +37,57 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -These are unit and integration tests on the file_io module of pelicun. -""" +"""These are unit and integration tests on the file_io module of pelicun.""" from __future__ import annotations + import tempfile -import os -import pytest +from pathlib import Path + import numpy as np import pandas as pd -from pelicun import file_io -from pelicun import base -from pelicun.warnings import PelicunWarning +import pytest +from pelicun import base, file_io +from pelicun.pelicun_warnings import PelicunWarning # The tests maintain the order of definitions of the `file_io.py` file. -def test_save_to_csv(): +def test_save_to_csv() -> None: # Test saving with orientation 0 - data = pd.DataFrame({"A": [1e-3, 2e-3, 3e-3], "B": [4e-3, 5e-3, 6e-3]}) - units = pd.Series(["meters", "meters"], index=["A", "B"]) - unit_conversion_factors = {"meters": 0.001} + data = pd.DataFrame({'A': [1e-3, 2e-3, 3e-3], 'B': [4e-3, 5e-3, 6e-3]}) + units = pd.Series(['meters', 'meters'], index=['A', 'B']) + unit_conversion_factors = {'meters': 0.001} # Save to a temporary file with tempfile.TemporaryDirectory() as tmpdir: - filepath = os.path.join(tmpdir, 'foo.csv') + filepath = Path(tmpdir) / 'foo.csv' file_io.save_to_csv( data, filepath, units, unit_conversion_factors, orientation=0 ) - assert os.path.isfile(filepath) + assert Path(filepath).is_file() # Check that the file contains the expected data - with open(filepath, 'r', encoding='utf-8') as f: + with Path(filepath).open(encoding='utf-8') as f: contents = f.read() assert contents == ( ',A,B\n0,meters,meters\n0,1.0,4.0' '\n1,2.0,5.0\n2,3.0,6.0\n' ) # Test saving with orientation 1 - data = pd.DataFrame({"A": [1e-3, 2e-3, 3e-3], "B": [4e-3, 5e-3, 6e-3]}) - units = pd.Series(["meters", "meters"], index=["A", "B"]) - unit_conversion_factors = {"meters": 0.001} + data = pd.DataFrame({'A': [1e-3, 2e-3, 3e-3], 'B': [4e-3, 5e-3, 6e-3]}) + units = pd.Series(['meters', 'meters'], index=['A', 'B']) + unit_conversion_factors = {'meters': 0.001} # Save to a temporary file with tempfile.TemporaryDirectory() as tmpdir: - filepath = os.path.join(tmpdir, 'bar.csv') + filepath = Path(tmpdir) / 'bar.csv' file_io.save_to_csv( data, filepath, units, unit_conversion_factors, orientation=1 ) - assert os.path.isfile(filepath) + assert Path(filepath).is_file() # Check that the file contains the expected data - with open(filepath, 'r', encoding='utf-8') as f: + with Path(filepath).open(encoding='utf-8') as f: contents = f.read() assert contents == ( ',0,A,B\n0,,0.001,0.004\n1,,0.002,' '0.005\n2,,0.003,0.006\n' @@ -99,38 +97,40 @@ def test_save_to_csv(): # edge cases # - data = pd.DataFrame({"A": [1e-3, 2e-3, 3e-3], "B": [4e-3, 5e-3, 6e-3]}) - units = pd.Series(["meters", "meters"], index=["A", "B"]) + data = pd.DataFrame({'A': [1e-3, 2e-3, 3e-3], 'B': [4e-3, 5e-3, 6e-3]}) + units = pd.Series(['meters', 'meters'], index=['A', 'B']) # units given, without unit conversion factors - unit_conversion_factors = None - with pytest.raises(ValueError): - with tempfile.TemporaryDirectory() as tmpdir: - filepath = os.path.join(tmpdir, 'foo.csv') - file_io.save_to_csv( - data, filepath, units, unit_conversion_factors, orientation=0 - ) + filepath = Path(tmpdir) / 'foo.csv' + with pytest.raises( + ValueError, + match='When `units` is not None, `unit_conversion_factors` must be provided.', + ), tempfile.TemporaryDirectory() as tmpdir: + file_io.save_to_csv( + data, filepath, units, unit_conversion_factors=None, orientation=0 + ) - unit_conversion_factors = {"meters": 0.001} + unit_conversion_factors = {'meters': 0.001} # not csv extension - with pytest.raises(ValueError): - with tempfile.TemporaryDirectory() as tmpdir: - filepath = os.path.join(tmpdir, 'foo.xyz') - file_io.save_to_csv( - data, filepath, units, unit_conversion_factors, orientation=0 - ) + filepath = Path(tmpdir) / 'foo.xyz' + with pytest.raises( + ValueError, + match=('Please use the `.csv` file extension. Received file name is '), + ), tempfile.TemporaryDirectory() as tmpdir: + file_io.save_to_csv( + data, filepath, units, unit_conversion_factors, orientation=0 + ) # no data, log a complaint mylogger = base.Logger( - verbose=True, log_show_ms=False, log_file=None, print_log=True + log_file=None, verbose=True, log_show_ms=False, print_log=True ) - data = None with tempfile.TemporaryDirectory() as tmpdir: - filepath = os.path.join(tmpdir, 'foo.csv') + filepath = Path(tmpdir) / 'foo.csv' with pytest.warns(PelicunWarning) as record: file_io.save_to_csv( - data, + None, filepath, units, unit_conversion_factors, @@ -140,10 +140,13 @@ def test_save_to_csv(): assert 'Data was empty, no file saved.' in str(record.list[0].message) -def test_substitute_default_path(): +def test_substitute_default_path() -> None: prior_path = file_io.base.pelicun_path - file_io.base.pelicun_path = 'some_path' - input_paths = ['PelicunDefault/data/file1.txt', '/data/file2.txt'] + file_io.base.pelicun_path = Path('some_path') + input_paths: list[str | pd.DataFrame] = [ + 'PelicunDefault/data/file1.txt', + '/data/file2.txt', + ] expected_paths = [ 'some_path/resources/SimCenterDBDL/data/file1.txt', '/data/file2.txt', @@ -153,24 +156,24 @@ def test_substitute_default_path(): file_io.base.pelicun_path = prior_path -def test_load_data(): +def test_load_data() -> None: # test loading data with orientation 0 filepath = 'pelicun/tests/basic/data/file_io/test_load_data/units.csv' - unit_conversion_factors = {"inps2": 0.0254, "rad": 1.00} + unit_conversion_factors = {'inps2': 0.0254, 'rad': 1.00} data = file_io.load_data(filepath, unit_conversion_factors) - assert np.array_equal(data.index.values, np.array(range(6))) - assert data.shape == (6, 19) - assert isinstance(data.columns, pd.core.indexes.multi.MultiIndex) - assert data.columns.nlevels == 4 + assert np.array_equal(data.index.values, np.array(range(6))) # type: ignore + assert data.shape == (6, 19) # type: ignore + assert isinstance(data.columns, pd.core.indexes.multi.MultiIndex) # type: ignore + assert data.columns.nlevels == 4 # type: ignore _, units = file_io.load_data( filepath, unit_conversion_factors, return_units=True ) for item in unit_conversion_factors: - assert item in units.unique() + assert item in units.unique() # type: ignore filepath = 'pelicun/tests/basic/data/file_io/test_load_data/no_units.csv' data_nounits = file_io.load_data(filepath, {}) @@ -187,7 +190,7 @@ def test_load_data(): # with convert=None filepath = 'pelicun/tests/basic/data/file_io/test_load_data/orient_1_units.csv' - unit_conversion_factors = {"g": 1.00, "rad": 1.00} + unit_conversion_factors = {'g': 1.00, 'rad': 1.00} data = file_io.load_data( filepath, unit_conversion_factors, orientation=1, reindex=False ) @@ -199,7 +202,7 @@ def test_load_data(): data = file_io.load_data( filepath, unit_conversion_factors, orientation=1, reindex=True ) - assert np.array_equal(data.index.values, np.array(range(10))) + assert np.array_equal(data.index.values, np.array(range(10))) # type: ignore # # edge cases @@ -209,7 +212,10 @@ def test_load_data(): with pytest.raises(FileNotFoundError): file_io.load_from_file('/') # exception: not a .csv file - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match='Unexpected file type received when trying to load from csv', + ): file_io.load_from_file('pelicun/base.py') diff --git a/pelicun/tests/basic/test_loss_model.py b/pelicun/tests/basic/test_loss_model.py index cca2eb2d8..a698da019 100644 --- a/pelicun/tests/basic/test_loss_model.py +++ b/pelicun/tests/basic/test_loss_model.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,44 +37,47 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -These are unit and integration tests on the loss model of pelicun. -""" +"""These are unit and integration tests on the loss model of pelicun.""" from __future__ import annotations -from itertools import product + +import re from copy import deepcopy -import pytest +from itertools import product +from typing import TYPE_CHECKING + import numpy as np import pandas as pd -from pelicun import model -from pelicun import uq +import pytest + +from pelicun import model, uq +from pelicun.base import ensure_value +from pelicun.model.loss_model import ( + LossModel, + RepairModel_DS, + RepairModel_LF, + _is_for_ds_model, + _is_for_lf_model, +) +from pelicun.pelicun_warnings import PelicunWarning from pelicun.tests.basic.test_pelicun_model import TestPelicunModel -from pelicun.model.loss_model import LossModel -from pelicun.model.loss_model import RepairModel_Base -from pelicun.model.loss_model import RepairModel_DS -from pelicun.model.loss_model import RepairModel_LF -from pelicun.model.loss_model import _is_for_ds_model -from pelicun.model.loss_model import _is_for_lf_model -from pelicun.warnings import PelicunWarning -# pylint: disable=missing-function-docstring -# pylint: disable=missing-class-docstring -# pylint: disable=missing-return-doc,missing-return-type-doc +if TYPE_CHECKING: + from pelicun.assessment import Assessment + from pelicun.model.asset_model import AssetModel class TestLossModel(TestPelicunModel): - @pytest.fixture - def loss_model(self, assessment_instance): + def loss_model(self, assessment_instance: Assessment) -> LossModel: return deepcopy(assessment_instance.loss) @pytest.fixture - def asset_model_empty(self, assessment_instance): + def asset_model_empty(self, assessment_instance: Assessment) -> AssetModel: return deepcopy(assessment_instance.asset) @pytest.fixture - def asset_model_A(self, asset_model_empty): + def asset_model_a(self, asset_model_empty: AssetModel) -> AssetModel: asset = deepcopy(asset_model_empty) asset.cmp_marginal_params = pd.DataFrame( { @@ -94,7 +96,7 @@ def asset_model_A(self, asset_model_empty): return asset @pytest.fixture - def loss_model_with_ones(self, assessment_instance): + def loss_model_with_ones(self, assessment_instance: Assessment) -> LossModel: loss_model = assessment_instance.loss # add artificial values to the samples @@ -117,15 +119,13 @@ def loss_model_with_ones(self, assessment_instance): ('uid1', 'uid2'), ): data_ds[ - ( - decision_variable, - consequence, - component, - damage_state, - location, - direction, - uid, - ) + decision_variable, + consequence, + component, + damage_state, + location, + direction, + uid, ] = [1.00, 1.00, 1.00] loss_model.ds_model.sample = pd.DataFrame(data_ds).rename_axis( columns=['dv', 'loss', 'dmg', 'ds', 'loc', 'dir', 'uid'] @@ -147,14 +147,12 @@ def loss_model_with_ones(self, assessment_instance): ('uid1', 'uid2'), ): data_lf[ - ( - decision_variable, - consequence, - component, - location, - direction, - uid, - ) + decision_variable, + consequence, + component, + location, + direction, + uid, ] = [1.00, 1.00, 1.00] loss_model.lf_model.sample = pd.DataFrame(data_lf).rename_axis( columns=['dv', 'loss', 'dmg', 'loc', 'dir', 'uid'] @@ -162,43 +160,47 @@ def loss_model_with_ones(self, assessment_instance): return loss_model - def test___init__(self, loss_model): + def test___init__(self, loss_model: LossModel) -> None: assert loss_model.log assert loss_model.ds_model with pytest.raises(AttributeError): - loss_model.xyz = 123 + loss_model.xyz = 123 # type: ignore assert loss_model.ds_model.loss_params is None assert loss_model.ds_model.sample is None assert len(loss_model._loss_models) == 2 - def test_decision_variables(self, loss_model): + def test_decision_variables(self, loss_model: LossModel) -> None: dvs = ('Carbon', 'Cost', 'Energy', 'Time') assert loss_model.decision_variables == dvs assert loss_model.ds_model.decision_variables == dvs assert loss_model.lf_model.decision_variables == dvs - def test_add_loss_map(self, loss_model, asset_model_A): - - loss_model._asmnt.asset = asset_model_A + def test_add_loss_map( + self, loss_model: LossModel, asset_model_a: AssetModel + ) -> None: + loss_model._asmnt.asset = asset_model_a - loss_map = loss_map = pd.DataFrame( + loss_map = pd.DataFrame( { 'Repair': ['consequence.A', 'consequence.B'], }, index=['cmp.A', 'cmp.B'], ) loss_model.add_loss_map(loss_map) - pd.testing.assert_frame_equal(loss_model._loss_map, loss_map) + pd.testing.assert_frame_equal(ensure_value(loss_model._loss_map), loss_map) for contained_model in loss_model._loss_models: - pd.testing.assert_frame_equal(contained_model._loss_map, loss_map) - - def test_load_model_parameters(self, loss_model, asset_model_A): + pd.testing.assert_frame_equal( + ensure_value(contained_model.loss_map), loss_map + ) - loss_model._asmnt.asset = asset_model_A + def test_load_model_parameters( + self, loss_model: LossModel, asset_model_a: AssetModel + ) -> None: + loss_model._asmnt.asset = asset_model_a loss_model.decision_variables = ('my_RV',) - loss_map = loss_map = pd.DataFrame( + loss_map = pd.DataFrame( { 'Repair': ['consequence.A', 'consequence.B', 'consequence.F'], }, @@ -238,23 +240,23 @@ def test_load_model_parameters(self, loss_model, asset_model_A): ) # assert len(record) == 1 - # TODO: re-enable the line above once we address other + # TODO(JVM): re-enable the line above once we address other # warnings, and change indexing to [0] below. assert ( - "The loss model does not provide loss information " - "for the following component(s) in the asset " + 'The loss model does not provide loss information ' + 'for the following component(s) in the asset ' "model: [('consequence.F', 'my_RV')]." ) in str(record[-1].message) - def test__loss_models(self, loss_model): + def test__loss_models(self, loss_model: LossModel) -> None: models = loss_model._loss_models assert len(models) == 2 assert isinstance(models[0], RepairModel_DS) assert isinstance(models[1], RepairModel_LF) - def test__loss_map(self, loss_model): - loss_map = loss_map = pd.DataFrame( + def test__loss_map(self, loss_model: LossModel) -> None: + loss_map = pd.DataFrame( { 'Repair': ['consequence_A', 'consequence_B'], }, @@ -263,11 +265,13 @@ def test__loss_map(self, loss_model): # test setter loss_model._loss_map = loss_map # test getter - pd.testing.assert_frame_equal(loss_model._loss_map, loss_map) + pd.testing.assert_frame_equal(ensure_value(loss_model._loss_map), loss_map) for contained_model in loss_model._loss_models: - pd.testing.assert_frame_equal(contained_model._loss_map, loss_map) + pd.testing.assert_frame_equal( + ensure_value(contained_model.loss_map), loss_map + ) - def test__missing(self, loss_model): + def test__missing(self, loss_model: LossModel) -> None: missing = { ('missing.component', 'Time'), ('missing.component', 'Energy'), @@ -277,9 +281,11 @@ def test__missing(self, loss_model): # test getter assert loss_model._missing == missing for contained_model in loss_model._loss_models: - assert contained_model._missing == missing + assert contained_model.missing == missing - def test__ensure_loss_parameter_availability(self, assessment_instance): + def test__ensure_loss_parameter_availability( + self, assessment_instance: Assessment + ) -> None: loss_model = LossModel(assessment_instance) # Only consider `DecisionVariableXYZ` @@ -289,7 +295,7 @@ def test__ensure_loss_parameter_availability(self, assessment_instance): # C, D should be in the lf model # E should be missing - loss_map = loss_map = pd.DataFrame( + loss_map = pd.DataFrame( { 'Repair': [f'consequence_{x}' for x in ('A', 'B', 'C', 'D', 'E')], }, @@ -321,16 +327,18 @@ def test__ensure_loss_parameter_availability(self, assessment_instance): assert missing == {('consequence_E', 'DecisionVariableXYZ')} assert len(record) == 1 assert ( - "The loss model does not provide loss information " - "for the following component(s) in the asset model: " + 'The loss model does not provide loss information ' + 'for the following component(s) in the asset model: ' "[('consequence_E', 'DecisionVariableXYZ')]" ) in str(record[0].message) - def test_aggregate_losses_when_no_loss(self, assessment_instance): - + def test_aggregate_losses_when_no_loss( + self, assessment_instance: Assessment + ) -> None: # tests that aggregate losses works when there is no loss. loss_model = LossModel(assessment_instance) df_agg = loss_model.aggregate_losses() + assert isinstance(df_agg, pd.DataFrame) pd.testing.assert_frame_equal( df_agg, pd.DataFrame( @@ -345,8 +353,9 @@ def test_aggregate_losses_when_no_loss(self, assessment_instance): ), ) - def test__apply_consequence_scaling(self, loss_model_with_ones): - + def test__apply_consequence_scaling( + self, loss_model_with_ones: LossModel + ) -> None: # When only `dv` is provided scaling_conditions = {'dv': 'Cost'} scaling_factor = 2.00 @@ -356,6 +365,7 @@ def test__apply_consequence_scaling(self, loss_model_with_ones): ) for loss_model in loss_model_with_ones._loss_models: + assert loss_model.sample is not None mask = loss_model.sample.columns.get_level_values('dv') == 'Cost' assert np.all(loss_model.sample.iloc[:, mask] == 2.00) assert np.all(loss_model.sample.iloc[:, ~mask] == 1.00) @@ -368,15 +378,17 @@ def test__apply_consequence_scaling(self, loss_model_with_ones): ) for loss_model in loss_model_with_ones._loss_models: - mask = np.full(len(loss_model.sample.columns), True) + assert loss_model.sample is not None + mask = np.full(len(loss_model.sample.columns), fill_value=True) mask &= loss_model.sample.columns.get_level_values('dv') == 'Carbon' mask &= loss_model.sample.columns.get_level_values('loc') == '1' mask &= loss_model.sample.columns.get_level_values('uid') == 'uid2' assert np.all(loss_model.sample.iloc[:, mask] == 2.00) assert np.all(loss_model.sample.iloc[:, ~mask] == 1.00) - def test_aggregate_losses_combination(self, assessment_instance): - + def test_aggregate_losses_combination( + self, assessment_instance: Assessment + ) -> None: # The test sets up a very simple loss calculation from # scratch, only defining essential parameters. @@ -389,15 +401,15 @@ def test_aggregate_losses_combination(self, assessment_instance): }, index=['Units', 'Theta_0'], ).T - perfect_CORR = pd.DataFrame( + perfect_corr = pd.DataFrame( np.ones((2, 2)), columns=demand_marginal_parameters.index, index=demand_marginal_parameters.index, ) assessment_instance.demand.load_model( - {'marginals': demand_marginal_parameters, 'correlation': perfect_CORR} + {'marginals': demand_marginal_parameters, 'correlation': perfect_corr} ) - assessment_instance.demand.generate_sample({"SampleSize": sample_size}) + assessment_instance.demand.generate_sample({'SampleSize': sample_size}) # asset assessment_instance.asset.cmp_marginal_params = pd.DataFrame( @@ -433,7 +445,7 @@ def test_aggregate_losses_combination(self, assessment_instance): assessment_instance.loss.calculate() # individual losses - l1, l2 = assessment_instance.loss.lf_model.sample.iloc[0, :] + l1, l2 = ensure_value(assessment_instance.loss.lf_model.sample).iloc[0, :] # combined loss, result of interpolation l_comb = 0.904 @@ -444,7 +456,7 @@ def test_aggregate_losses_combination(self, assessment_instance): ), index_col=None, header=None, - ).values + ).to_numpy() loss_combination = { 'Cost': { ('wind.comp', 'flood.comp'): combination_array, @@ -454,6 +466,7 @@ def test_aggregate_losses_combination(self, assessment_instance): agg_df, _ = assessment_instance.loss.aggregate_losses( loss_combination=loss_combination, future=True ) + assert isinstance(agg_df, pd.DataFrame) pd.testing.assert_frame_equal( agg_df, pd.DataFrame([l_comb] * 5, columns=['repair_cost']) ) @@ -464,11 +477,14 @@ def test_aggregate_losses_combination(self, assessment_instance): assert l2 == combination_array[0, 4] assert combination_array[8, 0] <= l1 <= combination_array[9, 0] - def test_aggregate_losses_thresholds(self, loss_model_with_ones): - + def test_aggregate_losses_thresholds( + self, loss_model_with_ones: LossModel + ) -> None: # Row 0 has the value of 1.0 in all columns. # Adjust rows 1 and 2 to have the values 2.0 and 3.0, for # testing. + assert loss_model_with_ones.ds_model.sample is not None + assert loss_model_with_ones.lf_model.sample is not None loss_model_with_ones.decision_variables = ('Cost', 'Carbon') loss_model_with_ones.dv_units = {'Cost': 'USD_2011', 'Carbon': 'kg'} loss_model_with_ones.ds_model.sample.iloc[1, :] = 2.00 @@ -477,21 +493,24 @@ def test_aggregate_losses_thresholds(self, loss_model_with_ones): loss_model_with_ones.lf_model.sample.iloc[2, :] = 3.00 # Instantiate a RandomVariableRegistry to pass as an argument # to the method. - RV_reg = uq.RandomVariableRegistry(loss_model_with_ones._asmnt.options.rng) + rv_reg = uq.RandomVariableRegistry(loss_model_with_ones._asmnt.options.rng) # Add a threshold for `Cost` - RV_reg.add_RV( - uq.rv_class_map('deterministic')(name='Cost', theta=np.array((400.00,))) + rv_reg.add_RV( + uq.rv_class_map('deterministic')(name='Cost', theta=np.array((400.00,))) # type: ignore ) # Add a threshold for `Carbon` - RV_reg.add_RV( + rv_reg.add_RV( uq.rv_class_map('deterministic')( - name='Carbon', theta=np.array((100.00,)) + name='Carbon', + theta=np.array((100.00,)), # type: ignore ) ) df_agg, exceedance_bool_df = loss_model_with_ones.aggregate_losses( - replacement_configuration=(RV_reg, {'Cost': 0.50, 'Carbon': 1.00}), + replacement_configuration=(rv_reg, {'Cost': 0.50, 'Carbon': 1.00}), future=True, ) + assert isinstance(df_agg, pd.DataFrame) + assert isinstance(exceedance_bool_df, pd.DataFrame) df_agg_expected = pd.DataFrame( { 'repair_carbon': [96.00, 100.00, 100.00], @@ -506,8 +525,7 @@ def test_aggregate_losses_thresholds(self, loss_model_with_ones): exceedance_bool_df, exceedance_bool_df_expected ) - def test_consequence_scaling(self, loss_model_with_ones): - + def test_consequence_scaling(self, loss_model_with_ones: LossModel) -> None: loss_model_with_ones.consequence_scaling( 'pelicun/tests/basic/data/model/test_LossModel/scaling_specification.csv' ) @@ -528,9 +546,10 @@ def test_consequence_scaling(self, loss_model_with_ones): .set_index(['dv', 'loss', 'dmg', 'ds', 'loc', 'dir', 'uid']) .T.astype(float) ) - expected_ds.index = pd.RangeIndex(range(len(expected_ds))) + expected_ds.index = pd.RangeIndex(range(len(expected_ds))) # type: ignore pd.testing.assert_frame_equal( - loss_model_with_ones.ds_model.sample, expected_ds + loss_model_with_ones.ds_model.sample, # type: ignore + expected_ds, ) expected_lf = ( @@ -548,48 +567,52 @@ def test_consequence_scaling(self, loss_model_with_ones): .set_index(['dv', 'loss', 'dmg', 'loc', 'dir', 'uid']) .T.astype(float) ) - expected_lf.index = pd.RangeIndex(range(len(expected_lf))) + expected_lf.index = pd.RangeIndex(range(len(expected_lf))) # type: ignore pd.testing.assert_frame_equal( - loss_model_with_ones.lf_model.sample, expected_lf + loss_model_with_ones.lf_model.sample, # type: ignore + expected_lf, ) class TestRepairModel_Base(TestPelicunModel): - def test___init__(self, assessment_instance): - repair_model = RepairModel_Base(assessment_instance) + def test___init__(self, assessment_instance: Assessment) -> None: + repair_model = RepairModel_DS(assessment_instance) with pytest.raises(AttributeError): - # pylint: disable=assigning-non-slot - repair_model.xyz = 123 + repair_model.xyz = 123 # type: ignore - def test__drop_unused_loss_parameters(self, assessment_instance): - base_model = RepairModel_Base(assessment_instance) - loss_map = loss_map = pd.DataFrame( + def test_drop_unused_loss_parameters( + self, assessment_instance: Assessment + ) -> None: + base_model = RepairModel_DS(assessment_instance) + loss_map = pd.DataFrame( { 'Repair': ['consequence_A', 'consequence_B'], }, index=['cmp_A', 'cmp_B'], ) # without loss_params, it should do nothing - base_model._drop_unused_loss_parameters(loss_map) + base_model.drop_unused_loss_parameters(loss_map) base_model.loss_params = pd.DataFrame( index=[f'consequence_{x}' for x in ('A', 'B', 'C', 'D')] ) - base_model._drop_unused_loss_parameters(loss_map) + base_model.drop_unused_loss_parameters(loss_map) pd.testing.assert_frame_equal( base_model.loss_params, pd.DataFrame(index=[f'consequence_{x}' for x in ('A', 'B')]), ) - def test__remove_incomplete_components(self, assessment_instance): - base_model = RepairModel_Base(assessment_instance) + def test__remove_incomplete_components( + self, assessment_instance: Assessment + ) -> None: + base_model = RepairModel_DS(assessment_instance) # without loss_params, it should do nothing - base_model._remove_incomplete_components() + base_model.remove_incomplete_components() # without incomplete, it should do nothing loss_params = pd.DataFrame( index=[f'consequence_{x}' for x in ('A', 'B', 'C', 'D')] ) base_model.loss_params = loss_params - base_model._remove_incomplete_components() + base_model.remove_incomplete_components() pd.testing.assert_frame_equal( base_model.loss_params, loss_params, @@ -599,7 +622,7 @@ def test__remove_incomplete_components(self, assessment_instance): index=[f'consequence_{x}' for x in ('A', 'B', 'C', 'D')], ) # Now entry D should be gone - base_model._remove_incomplete_components() + base_model.remove_incomplete_components() pd.testing.assert_frame_equal( base_model.loss_params, pd.DataFrame( @@ -608,14 +631,16 @@ def test__remove_incomplete_components(self, assessment_instance): ), ) - def test__get_available(self, assessment_instance): - base_model = RepairModel_Base(assessment_instance) + def test__get_available(self, assessment_instance: Assessment) -> None: + base_model = RepairModel_DS(assessment_instance) base_model.loss_params = pd.DataFrame(index=['cmp.A', 'cmp.B', 'cmp.C']) - assert base_model._get_available() == {'cmp.A', 'cmp.B', 'cmp.C'} + assert base_model.get_available() == {'cmp.A', 'cmp.B', 'cmp.C'} class TestRepairModel_DS(TestRepairModel_Base): - def test__convert_loss_parameter_units(self, assessment_instance): + def test_convert_loss_parameter_units( + self, assessment_instance: Assessment + ) -> None: ds_model = RepairModel_DS(assessment_instance) ds_model.loss_params = pd.DataFrame( { @@ -628,7 +653,7 @@ def test__convert_loss_parameter_units(self, assessment_instance): index=pd.MultiIndex.from_tuples([('cmpA', 'Cost'), ('cmpB', 'Cost')]), ) - ds_model._convert_loss_parameter_units() + ds_model.convert_loss_parameter_units() # DVs are scaled by 3/2, quantities by 2 pd.testing.assert_frame_equal( @@ -647,7 +672,9 @@ def test__convert_loss_parameter_units(self, assessment_instance): ), ) - def test__drop_unused_damage_states(self, assessment_instance): + def test__drop_unused_damage_states( + self, assessment_instance: Assessment + ) -> None: ds_model = RepairModel_DS(assessment_instance) loss_params = pd.DataFrame( { @@ -661,16 +688,15 @@ def test__drop_unused_damage_states(self, assessment_instance): } ) ds_model.loss_params = loss_params - ds_model._drop_unused_damage_states() - pd.testing.assert_frame_equal(ds_model.loss_params, loss_params.iloc[0:4, :]) - - def test__create_DV_RVs(self, assessment_instance): + ds_model.drop_unused_damage_states() + pd.testing.assert_frame_equal(ds_model.loss_params, loss_params.iloc[:, 0:4]) + def test__create_DV_RVs(self, assessment_instance: Assessment) -> None: assessment_instance.options.rho_cost_time = 0.30 ds_model = RepairModel_DS(assessment_instance) ds_model.decision_variables = ('Cost', 'Time') - ds_model._missing = {('cmp.B', 'Cost'), ('cmp.B', 'Time')} - ds_model._loss_map = pd.DataFrame( + ds_model.missing = {('cmp.B', 'Cost'), ('cmp.B', 'Time')} + ds_model.loss_map = pd.DataFrame( { 'Repair': ['cmp.A', 'cmp.B', 'cmp.C', 'cmp.D', 'cmp.E'], }, @@ -709,6 +735,7 @@ def test__create_DV_RVs(self, assessment_instance): names=['cmp', 'loc', 'dir', 'uid', 'ds'], ) rv_reg = ds_model._create_DV_RVs(cases) + assert rv_reg is not None for key in ( 'Cost-cmp.A-1-0-1-0', 'Time-cmp.A-1-0-1-0', @@ -735,12 +762,13 @@ def test__create_DV_RVs(self, assessment_instance): ) assert len(rv_reg.RV_set) == 1 - def test__create_DV_RVs_all_deterministic(self, assessment_instance): - + def test__create_DV_RVs_all_deterministic( + self, assessment_instance: Assessment + ) -> None: ds_model = RepairModel_DS(assessment_instance) ds_model.decision_variables = ('myRV',) - ds_model._missing = set() - ds_model._loss_map = pd.DataFrame( + ds_model.missing = set() + ds_model.loss_map = pd.DataFrame( {'Repair': ['cmp.A']}, index=['cmp.A'], ) @@ -762,8 +790,9 @@ def test__create_DV_RVs_all_deterministic(self, assessment_instance): assert rv_reg is None - def test__calc_median_consequence_no_locs(self, assessment_instance): - + def test__calc_median_consequence_no_locs( + self, assessment_instance: Assessment + ) -> None: # Test the method when the eco_qnt dataframe's columns do not # contain `loc` information. @@ -783,7 +812,7 @@ def test__calc_median_consequence_no_locs(self, assessment_instance): # is_for_LF_model represents a component->consequence pair # that is intended for processing by the loss function model # and should be ignored by the damage state model. - ds_model._loss_map = pd.DataFrame( + ds_model.loss_map = pd.DataFrame( { 'Repair': ['cmp.A', 'cmp.B', 'missing_cmp', 'is_for_LF_model'], }, @@ -809,9 +838,10 @@ def test__calc_median_consequence_no_locs(self, assessment_instance): [('cmp.A', 'my_DV'), ('cmp.B', 'my_DV')] ), ).rename_axis(index=['Loss Driver', 'Decision Variable']) - ds_model._missing = {('missing_cmp', 'my_DV')} + ds_model.missing = {('missing_cmp', 'my_DV')} medians = ds_model._calc_median_consequence(eco_qnt) - assert len(medians) == 1 and 'my_DV' in medians + assert len(medians) == 1 + assert 'my_DV' in medians pd.testing.assert_frame_equal( medians['my_DV'], pd.DataFrame( @@ -839,14 +869,15 @@ def test__calc_median_consequence_no_locs(self, assessment_instance): }, index=pd.MultiIndex.from_tuples([('cmp.A', 'my_DV')]), ).rename_axis(index=['Loss Driver', 'Decision Variable']) - with pytest.raises(ValueError) as record: + with pytest.raises( + ValueError, + match='Loss Distribution of type multilinear_CDF not supported.', + ): ds_model._calc_median_consequence(eco_qnt) - assert 'Loss Distribution of type multilinear_CDF not supported.' in str( - record.value - ) - - def test__calc_median_consequence_locs(self, assessment_instance): + def test__calc_median_consequence_locs( + self, assessment_instance: Assessment + ) -> None: # Test the method when the eco_qnt dataframe's columns contain # `loc` information. @@ -863,7 +894,7 @@ def test__calc_median_consequence_locs(self, assessment_instance): # is_for_LF_model represents a component->consequence pair # that is intended for processing by the loss function model # and should be ignored by the damage state model. - ds_model._loss_map = pd.DataFrame( + ds_model.loss_map = pd.DataFrame( { 'Repair': ['cmp.A'], }, @@ -887,9 +918,10 @@ def test__calc_median_consequence_locs(self, assessment_instance): }, index=pd.MultiIndex.from_tuples([('cmp.A', 'my_DV')]), ).rename_axis(index=['Loss Driver', 'Decision Variable']) - ds_model._missing = set() + ds_model.missing = set() medians = ds_model._calc_median_consequence(eco_qnt) - assert len(medians) == 1 and 'my_DV' in medians + assert len(medians) == 1 + assert 'my_DV' in medians pd.testing.assert_frame_equal( medians['my_DV'], pd.DataFrame( @@ -901,8 +933,9 @@ def test__calc_median_consequence_locs(self, assessment_instance): class TestRepairModel_LF(TestRepairModel_Base): - - def test__convert_loss_parameter_units(self, assessment_instance): + def test_convert_loss_parameter_units( + self, assessment_instance: Assessment + ) -> None: lf_model = RepairModel_LF(assessment_instance) lf_model.loss_params = pd.DataFrame( { @@ -916,7 +949,7 @@ def test__convert_loss_parameter_units(self, assessment_instance): index=pd.MultiIndex.from_tuples([('cmpA', 'Cost'), ('cmpB', 'Cost')]), ) - lf_model._convert_loss_parameter_units() + lf_model.convert_loss_parameter_units() pd.testing.assert_frame_equal( lf_model.loss_params, @@ -935,8 +968,7 @@ def test__convert_loss_parameter_units(self, assessment_instance): ), ) - def test__calc_median_consequence(self, assessment_instance): - + def test__calc_median_consequence(self, assessment_instance: Assessment) -> None: lf_model = RepairModel_LF(assessment_instance) performance_group = pd.DataFrame( @@ -974,25 +1006,26 @@ def test__calc_median_consequence(self, assessment_instance): ) # test small interpolation domain warning demand_dict = {'PFA-1-1': np.array((1.00, 2.00, 1e3))} - with pytest.raises(ValueError) as record: + with pytest.raises( + ValueError, + match=re.escape( + 'Loss function interpolation for consequence ' + '`cmp.A-dv.A` has failed. Ensure a sufficient ' + 'interpolation domain for the X values ' + '(those after the `|` symbol) and verify ' + 'the X-value and Y-value lengths match.' + ), + ): lf_model._calc_median_consequence( performance_group, loss_map, required_edps, demand_dict, cmp_sample ) - assert ( - 'Loss function interpolation for consequence ' - '`cmp.A-dv.A` has failed. Ensure a sufficient ' - 'interpolation domain for the X values ' - '(those after the `|` symbol) and verify ' - 'the X-value and Y-value lengths match.' - ) in str(record.value) - - def test__create_DV_RVs(self, assessment_instance): + def test__create_DV_RVs(self, assessment_instance: Assessment) -> None: assessment_instance.options.rho_cost_time = 0.50 lf_model = RepairModel_LF(assessment_instance) lf_model.decision_variables = ('Cost', 'Time') - lf_model._missing = set() - lf_model._loss_map = pd.DataFrame( + lf_model.missing = set() + lf_model.loss_map = pd.DataFrame( { 'Repair': ['cmp.A', 'cmp.B'], }, @@ -1028,6 +1061,7 @@ def test__create_DV_RVs(self, assessment_instance): names=['dv', 'loss', 'dmg', 'loc', 'dir', 'uid', 'block'], ) rv_reg = lf_model._create_DV_RVs(cases) + assert rv_reg is not None for key in ( 'Cost-cmp.A-cmp.A-0-1-0-1', 'Time-cmp.A-cmp.A-0-1-0-1', @@ -1053,14 +1087,15 @@ def test__create_DV_RVs(self, assessment_instance): ) assert len(rv_reg.RV_set) == 1 - def test__create_DV_RVs_no_rv_case(self, assessment_instance): - + def test__create_DV_RVs_no_rv_case( + self, assessment_instance: Assessment + ) -> None: # Special case where there is no need for RVs lf_model = RepairModel_LF(assessment_instance) lf_model.decision_variables = ('Cost', 'Time') - lf_model._missing = set() - lf_model._loss_map = pd.DataFrame( + lf_model.missing = set() + lf_model.loss_map = pd.DataFrame( { 'Repair': ['cmp.B'], }, @@ -1091,16 +1126,16 @@ def test__create_DV_RVs_no_rv_case(self, assessment_instance): assert rv_reg is None -def test__prep_constant_median_DV(): +def test__prep_constant_median_DV() -> None: median = 10.00 - constant_median_DV = model.loss_model._prep_constant_median_DV(median) - assert constant_median_DV() == median + constant_median_dv = model.loss_model._prep_constant_median_DV(median) + assert constant_median_dv() == median values = (1.0, 2.0, 3.0, 4.0, 5.0) for value in values: - assert constant_median_DV(value) == 10.00 + assert constant_median_dv(value) == 10.00 -def test__prep_bounded_multilinear_median_DV(): +def test__prep_bounded_multilinear_median_DV() -> None: medians = np.array((1.00, 2.00, 3.00, 4.00, 5.00)) quantities = np.array((0.00, 1.00, 2.00, 3.00, 4.00)) f = model.loss_model._prep_bounded_multilinear_median_DV(medians, quantities) @@ -1129,12 +1164,18 @@ def test__prep_bounded_multilinear_median_DV(): expected_list = [3.5, 4.5] assert np.allclose(result_list, expected_list) - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match=( + 'A bounded linear median Decision Variable function ' + 'called without specifying the quantity ' + 'of damaged components' + ), + ): f(None) -def test__is_for_lf_model(): - +def test__is_for_lf_model() -> None: positive_case = pd.DataFrame( { ('LossFunction', 'Theta_0'): [0.5], @@ -1153,8 +1194,7 @@ def test__is_for_lf_model(): assert _is_for_lf_model(negative_case) is False -def test__is_for_ds_model(): - +def test__is_for_ds_model() -> None: positive_case = pd.DataFrame( { ('DS1', 'Theta_0'): [0.50], diff --git a/pelicun/tests/basic/test_model.py b/pelicun/tests/basic/test_model.py index 5c12de685..af878f089 100644 --- a/pelicun/tests/basic/test_model.py +++ b/pelicun/tests/basic/test_model.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,24 +37,22 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -This file defines a class used by the model unit tests. -""" +"""This file defines a class used by the model unit tests.""" from __future__ import annotations + from copy import deepcopy +from typing import Callable + import pytest -from pelicun import assessment -# pylint: disable=missing-function-docstring -# pylint: disable=missing-class-docstring -# pylint: disable=missing-return-doc,missing-return-type-doc +from pelicun import assessment class TestModelModule: @pytest.fixture - def assessment_factory(self): - def create_instance(verbose): + def assessment_factory(self) -> Callable: + def create_instance(*, verbose: bool) -> assessment.Assessment: x = assessment.Assessment() x.log.verbose = verbose return x @@ -63,5 +60,5 @@ def create_instance(verbose): return create_instance @pytest.fixture(params=[True, False]) - def assessment_instance(self, request, assessment_factory): - return deepcopy(assessment_factory(request.param)) + def assessment_instance(self, request, assessment_factory) -> None: # noqa: ANN001 + return deepcopy(assessment_factory(verbose=request.param)) diff --git a/pelicun/tests/basic/test_pelicun_model.py b/pelicun/tests/basic/test_pelicun_model.py index 4b43ce053..6b37fd77d 100644 --- a/pelicun/tests/basic/test_pelicun_model.py +++ b/pelicun/tests/basic/test_pelicun_model.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -38,33 +37,34 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -These are unit and integration tests on the PelicunModel class. -""" +"""These are unit and integration tests on the PelicunModel class.""" from __future__ import annotations + from copy import deepcopy -import pytest +from typing import TYPE_CHECKING + import numpy as np import pandas as pd +import pytest + from pelicun import model from pelicun.tests.basic.test_model import TestModelModule - -# pylint: disable=missing-function-docstring -# pylint: disable=missing-class-docstring -# pylint: disable=missing-return-doc,missing-return-type-doc +if TYPE_CHECKING: + from pelicun.assessment import Assessment + from pelicun.model.pelicun_model import PelicunModel class TestPelicunModel(TestModelModule): @pytest.fixture - def pelicun_model(self, assessment_instance): + def pelicun_model(self, assessment_instance: Assessment) -> PelicunModel: return deepcopy(model.PelicunModel(assessment_instance)) - def test_init(self, pelicun_model): + def test_init(self, pelicun_model: PelicunModel) -> None: assert pelicun_model.log - def test__convert_marginal_params(self, pelicun_model): + def test__convert_marginal_params(self, pelicun_model: PelicunModel) -> None: # one row, only Theta_0, no conversion marginal_params = pd.DataFrame( [['1.0']], @@ -206,7 +206,7 @@ def test__convert_marginal_params(self, pelicun_model): expected_df, res, check_index_type=False, check_column_type=False ) - def test_query_error_setup(self, pelicun_model): + def test_query_error_setup(self, pelicun_model: PelicunModel) -> None: assert ( pelicun_model.query_error_setup( 'Loss/ReplacementThreshold/RaiseOnUnknownKeys' diff --git a/pelicun/tests/basic/test_uq.py b/pelicun/tests/basic/test_uq.py index 7dfd1cceb..f75c56969 100644 --- a/pelicun/tests/basic/test_uq.py +++ b/pelicun/tests/basic/test_uq.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -47,16 +46,22 @@ """ from __future__ import annotations + +import math +import re import warnings -import pytest + import numpy as np -from scipy.stats import norm # type: ignore -from scipy.stats import lognorm # type: ignore -from scipy.stats import weibull_min # type: ignore -from pelicun import uq -from pelicun.tests.util import import_pickle -from pelicun.tests.util import export_pickle +import pytest +from scipy.stats import ( + lognorm, # type: ignore + norm, # type: ignore + weibull_min, # type: ignore +) +from pelicun import uq +from pelicun.base import ensure_value +from pelicun.tests.util import export_pickle, import_pickle # The tests maintain the order of definitions of the `uq.py` file. @@ -69,54 +74,65 @@ # The following tests verify the functions of the module. -def test_scale_distribution(): +def test_scale_distribution() -> None: # used in all cases theta = np.array((-1.00, 1.00)) trunc = np.array((-2.00, 2.00)) # case 1: # normal distribution, factor of two - res = uq.scale_distribution(2.00, 'normal', theta, trunc) - assert np.allclose(res[0], np.array((-2.00, 1.00))) # theta_new - assert np.allclose(res[1], np.array((-4.00, 4.00))) # truncation_limits + theta_new, truncation_limits = uq.scale_distribution( + 2.00, 'normal', theta, trunc + ) + assert truncation_limits is not None + assert np.allclose(theta_new, np.array((-2.00, 1.00))) + assert np.allclose(truncation_limits, np.array((-4.00, 4.00))) # case 2: # normal_std distribution, factor of two res = uq.scale_distribution(2.00, 'normal_std', theta, trunc) + assert res[1] is not None assert np.allclose(res[0], np.array((-2.00, 2.00))) # theta_new assert np.allclose(res[1], np.array((-4.00, 4.00))) # truncation_limits # case 3: # normal_cov distribution, factor of two res = uq.scale_distribution(2.00, 'normal_cov', theta, trunc) + assert res[1] is not None assert np.allclose(res[0], np.array((-2.00, 1.00))) # theta_new assert np.allclose(res[1], np.array((-4.00, 4.00))) # truncation_limits # case 4: # lognormal distribution, factor of two res = uq.scale_distribution(2.00, 'lognormal', theta, trunc) + assert res[1] is not None assert np.allclose(res[0], np.array((-2.00, 1.00))) # theta_new assert np.allclose(res[1], np.array((-4.00, 4.00))) # truncation_limits # case 5: # uniform distribution, factor of two res = uq.scale_distribution(2.00, 'uniform', theta, trunc) + assert res[1] is not None assert np.allclose(res[0], np.array((-2.00, 2.00))) # theta_new assert np.allclose(res[1], np.array((-4.00, 4.00))) # truncation_limits # case 6: unsupported distribution - with pytest.raises(ValueError): + with pytest.raises( + ValueError, match='Unsupported distribution: benktander-weibull' + ): uq.scale_distribution(0.50, 'benktander-weibull', np.array((1.00, 10.00))) -def test_mvn_orthotope_density(): +def test_mvn_orthotope_density() -> None: # case 1: # zero-width slice should result in a value of zero. mu_val = 0.00 cov_val = 1.00 lower_val = -1.00 upper_val = -1.00 - res = uq.mvn_orthotope_density(mu_val, cov_val, lower_val, upper_val) + res = uq.mvn_orthotope_density( + mu_val, np.atleast_2d([cov_val]), lower_val, upper_val + ) assert np.allclose(res, np.array((0.00, 2.00e-16))) # case 2: @@ -125,7 +141,9 @@ def test_mvn_orthotope_density(): cov_val = 1.00 lower_val = np.nan upper_val = 0.00 - res = uq.mvn_orthotope_density(mu_val, cov_val, lower_val, upper_val) + res = uq.mvn_orthotope_density( + mu_val, np.atleast_2d([cov_val]), lower_val, upper_val + ) assert np.allclose(res, np.array((0.50, 2.00e-16))) # case 3: @@ -134,7 +152,9 @@ def test_mvn_orthotope_density(): cov_val = 1.00 lower_val = 0.00 upper_val = np.nan - res = uq.mvn_orthotope_density(mu_val, cov_val, lower_val, upper_val) + res = uq.mvn_orthotope_density( + mu_val, np.atleast_2d([cov_val]), lower_val, upper_val + ) assert np.allclose(res, np.array((0.50, 2.00e-16))) # case 4: @@ -167,13 +187,12 @@ def test_mvn_orthotope_density(): assert np.allclose(res, np.array((1.00 / 8.00, 2.00e-16))) -def test__get_theta(): - +def test__get_theta() -> None: # Evaluate uq._get_theta() for some valid inputs res = uq._get_theta( np.array(((1.00, 1.00), (1.00, 0.5), (0.00, 0.3), (1.50, 0.2))), np.array(((0.00, 1.00), (1.00, 0.5), (0.00, 0.3), (1.00, 0.2))), - ['normal', 'lognormal', 'normal_std', 'normal_cov'], + np.array(['normal', 'lognormal', 'normal_std', 'normal_cov']), ) # Check that the expected output is obtained for each distribution type @@ -184,11 +203,15 @@ def test__get_theta(): assert np.allclose(res, expected_res) # Check that it fails for invalid inputs - with pytest.raises(ValueError): - uq._get_theta(np.array((1.00,)), np.array((1.00,)), 'not_a_distribution') + with pytest.raises( + ValueError, match='Unsupported distribution: not_a_distribution' + ): + uq._get_theta( + np.array((1.00,)), np.array((1.00,)), np.array(['not_a_distribution']) + ) -def test__get_limit_probs(): +def test__get_limit_probs() -> None: # verify that it works for valid inputs res = uq._get_limit_probs( @@ -233,7 +256,9 @@ def test__get_limit_probs(): # verify that it fails for invalid inputs - with pytest.raises(ValueError): + with pytest.raises( + ValueError, match='Unsupported distribution: not_a_distribution' + ): uq._get_limit_probs( np.array((1.00,)), 'not_a_distribution', @@ -241,7 +266,7 @@ def test__get_limit_probs(): ) -def test__get_std_samples(): +def test__get_std_samples() -> None: # test that it works with valid inputs # case 1: @@ -251,7 +276,7 @@ def test__get_std_samples(): tr_limits = np.array(((np.nan, np.nan),)) dist_list = np.array(('normal',)) res = uq._get_std_samples(samples, theta, tr_limits, dist_list) - assert np.allclose(res, np.array(((1.00, 2.00, 3.00)))) + assert np.allclose(res, np.array((1.00, 2.00, 3.00))) # case 2: # multivariate samples @@ -290,8 +315,9 @@ def test__get_std_samples(): ) # test that it fails for invalid inputs - - with pytest.raises(ValueError): + with pytest.raises( + ValueError, match='Unsupported distribution: some_unsupported_distribution' + ): uq._get_std_samples( np.array(((1.00, 2.00, 3.00),)), np.array(((0.00, 1.0),)), @@ -300,43 +326,50 @@ def test__get_std_samples(): ) -def test__get_std_corr_matrix(): +def test__get_std_corr_matrix() -> None: # test that it works with valid inputs # case 1: std_samples = np.array(((1.00,),)) res = uq._get_std_corr_matrix(std_samples) + assert res is not None assert np.allclose(res, np.array(((1.00,),))) # case 2: std_samples = np.array(((1.00, 0.00), (0.00, 1.00))) res = uq._get_std_corr_matrix(std_samples) + assert res is not None assert np.allclose(res, np.array(((1.00, 0.00), (0.00, 1.00)))) # case 3: std_samples = np.array(((1.00, 0.00), (0.00, -1.00))) res = uq._get_std_corr_matrix(std_samples) + assert res is not None assert np.allclose(res, np.array(((1.00, 0.00), (0.00, 1.00)))) # case 4: std_samples = np.array(((1.00, 1.00), (1.00, 1.00))) res = uq._get_std_corr_matrix(std_samples) + assert res is not None assert np.allclose(res, np.array(((1.00, 1.00), (1.00, 1.00)))) # case 5: std_samples = np.array(((1.00, 1e50), (-1.00, -1.00))) res = uq._get_std_corr_matrix(std_samples) + assert res is not None assert np.allclose(res, np.array(((1.00, 0.00), (0.00, 1.00)))) # test that it fails for invalid inputs for bad_item in (np.nan, np.inf, -np.inf): - with pytest.raises(ValueError): - x = np.array(((1.00, bad_item), (-1.00, -1.00))) + x = np.array(((1.00, bad_item), (-1.00, -1.00))) + with pytest.raises( + ValueError, match='std_samples array must not contain inf or NaN values' + ): uq._get_std_corr_matrix(x) -def test__mvn_scale(): +def test__mvn_scale() -> None: # case 1: np.random.seed(40) sample = np.random.normal(0.00, 1.00, size=(2, 5)).T @@ -352,7 +385,7 @@ def test__mvn_scale(): assert np.allclose(res, np.array((0.0, 0.0, 0.0, 0.0, 0.0))) -def test__neg_log_likelihood(): +def test__neg_log_likelihood() -> None: # Parameters not within the pre-defined bounds should yield a # large value to discourage the optimization algorithm from going # in that direction. @@ -367,9 +400,9 @@ def test__neg_log_likelihood(): (1.10, 0.30), ), ), - dist_list=['normal', 'normal'], - tr_limits=[None, None], - det_limits=[None, None], + dist_list=np.array(('normal', 'normal')), + tr_limits=np.array((np.nan, np.nan)), + det_limits=[np.array((np.nan, np.nan))], censored_count=0, enforce_bounds=True, ) @@ -380,17 +413,17 @@ def test__neg_log_likelihood(): res = uq._neg_log_likelihood( np.array((np.nan, 0.20)), np.array((1.00, 0.20)), - 0.00, - 20.00, + np.atleast_1d((0.00,)), + np.atleast_1d((20.00,)), np.array( ( (0.90, 0.10), (1.10, 0.30), ), ), - ['normal', 'normal'], - [-np.inf, np.inf], - [np.nan, np.nan], + np.array(('normal', 'normal')), + np.array((-np.inf, np.inf)), + [np.array((np.nan, np.nan))], 0, enforce_bounds=False, ) @@ -398,7 +431,7 @@ def test__neg_log_likelihood(): assert res == 1e10 -def test_fit_distribution_to_sample_univariate(): +def test_fit_distribution_to_sample_univariate() -> None: # a single value in the sample sample_vec = np.array((1.00,)) res = uq.fit_distribution_to_sample(sample_vec, 'normal') @@ -481,10 +514,10 @@ def test_fit_distribution_to_sample_univariate(): usable_sample, 'normal_cov', censored_count=c_count, - detection_limits=[c_lower, c_upper], + detection_limits=(c_lower, c_upper), ) compare_a = ( - np.array(((1.13825975, 0.46686491))), + np.array((1.13825975, 0.46686491)), np.array( ((1.00,)), ), @@ -496,10 +529,10 @@ def test_fit_distribution_to_sample_univariate(): usable_sample, 'normal_std', censored_count=c_count, - detection_limits=[c_lower, c_upper], + detection_limits=(c_lower, c_upper), ) compare_a = ( - np.array(((1.13825975, 0.53141375))), + np.array((1.13825975, 0.53141375)), np.array( ((1.00,)), ), @@ -520,10 +553,10 @@ def test_fit_distribution_to_sample_univariate(): usable_sample, 'normal_cov', censored_count=c_count, - detection_limits=[c_lower, c_upper], + detection_limits=(c_lower, c_upper), ) compare_b = ( - np.array(((-1.68598848, 1.75096914))), + np.array((-1.68598848, 1.75096914)), np.array( ((1.00,)), ), @@ -544,10 +577,10 @@ def test_fit_distribution_to_sample_univariate(): usable_sample, 'normal_cov', censored_count=c_count, - detection_limits=[c_lower, c_upper], + detection_limits=(c_lower, c_upper), ) compare_c = ( - np.array(((1.68598845, 1.75096921))), + np.array((1.68598845, 1.75096921)), np.array( ((1.00,)), ), @@ -565,9 +598,12 @@ def test_fit_distribution_to_sample_univariate(): sample_vec = np.array((-3.00, -2.00, -1.00, 0.00, 1.00, 2.00, 3.00)).reshape( (1, -1) ) - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match='One or more sample values lie outside of the specified truncation limits.', + ): res = uq.fit_distribution_to_sample( - sample_vec, 'normal_cov', truncation_limits=[t_lower, t_upper] + sample_vec, 'normal_cov', truncation_limits=(t_lower, t_upper) ) # truncated data, only lower, expect failure @@ -576,9 +612,15 @@ def test_fit_distribution_to_sample_univariate(): sample_vec = np.array((-3.00, -2.00, -1.00, 0.00, 1.00, 2.00, 3.00)).reshape( (1, -1) ) - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match=( + 'One or more sample values lie ' + 'outside of the specified truncation limits.' + ), + ): res = uq.fit_distribution_to_sample( - sample_vec, 'normal_cov', truncation_limits=[t_lower, t_upper] + sample_vec, 'normal_cov', truncation_limits=(t_lower, t_upper) ) # truncated data, only upper, expect failure @@ -587,9 +629,15 @@ def test_fit_distribution_to_sample_univariate(): sample_vec = np.array((-3.00, -2.00, -1.00, 0.00, 1.00, 2.00, 3.00)).reshape( (1, -1) ) - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match=( + 'One or more sample values lie ' + 'outside of the specified truncation limits.' + ), + ): res = uq.fit_distribution_to_sample( - sample_vec, 'normal_cov', truncation_limits=[t_lower, t_upper] + sample_vec, 'normal_cov', truncation_limits=(t_lower, t_upper) ) # truncated data, lower and upper @@ -598,10 +646,10 @@ def test_fit_distribution_to_sample_univariate(): t_upper = +4.50 sample_vec = np.array((0.00, 1.00, 2.00, 3.00, 4.00)).reshape((1, -1)) res_a = uq.fit_distribution_to_sample( - sample_vec, 'normal_cov', truncation_limits=[t_lower, t_upper] + sample_vec, 'normal_cov', truncation_limits=(t_lower, t_upper) ) compare_a = ( - np.array(((1.99999973, 2.2639968))), + np.array((1.99999973, 2.2639968)), np.array( ((1.00,)), ), @@ -617,9 +665,9 @@ def test_fit_distribution_to_sample_univariate(): (1, -1) ) res_b = uq.fit_distribution_to_sample( - sample_vec, 'normal_cov', truncation_limits=[t_lower, t_upper] + sample_vec, 'normal_cov', truncation_limits=(t_lower, t_upper) ) - compare_b = (np.array(((-0.09587816, 21.95601487))), np.array(((1.00,)))) + compare_b = (np.array((-0.09587816, 21.95601487)), np.array((1.00,))) assert np.allclose(res_b[0], compare_b[0]) assert np.allclose(res_b[1], compare_b[1]) @@ -631,10 +679,10 @@ def test_fit_distribution_to_sample_univariate(): (1, -1) ) res_c = uq.fit_distribution_to_sample( - sample_vec, 'normal_cov', truncation_limits=[t_lower, t_upper] + sample_vec, 'normal_cov', truncation_limits=(t_lower, t_upper) ) compare_c = ( - np.array(((0.09587811, 21.95602574))), + np.array((0.09587811, 21.95602574)), np.array( ((1.00,)), ), @@ -647,7 +695,7 @@ def test_fit_distribution_to_sample_univariate(): assert np.isclose(res_b[0][0, 1], res_c[0][0, 1]) -def test_fit_distribution_to_sample_multivariate(): +def test_fit_distribution_to_sample_multivariate() -> None: # uncorrelated, normal np.random.seed(40) sample = np.random.multivariate_normal( @@ -687,8 +735,8 @@ def test_fit_distribution_to_sample_multivariate(): res = uq.fit_distribution_to_sample( sample, ['normal_cov', 'normal_cov'], - truncation_limits=np.array((-5.00, 6.00)), - detection_limits=np.array((0.20, 1.80)), + truncation_limits=(-5.00, 6.00), + detection_limits=(0.20, 1.80), ) compare = ( np.array(((1.00833201, 1.0012552), (1.00828936, 0.99477853))), @@ -701,12 +749,12 @@ def test_fit_distribution_to_sample_multivariate(): np.random.seed(40) sample = np.full( (2, 10), - 3.14, + 123.00, ) np.random.seed(40) res = uq.fit_distribution_to_sample(sample, ['normal_cov', 'normal_cov']) compare = ( - np.array(((3.14, 1.0e-6), (3.14, 1.0e-6))), + np.array(((123.00, 1.0e-6), (123.00, 1.0e-6))), np.array(((1.00, 0.00), (0.00, 1.00))), ) assert np.allclose(res[0], compare[0]) @@ -721,7 +769,7 @@ def test_fit_distribution_to_sample_multivariate(): ) np.random.seed(40) res = uq.fit_distribution_to_sample( - sample, ['lognormal', 'lognormal'], detection_limits=np.array((1e-8, 5.00)) + sample, ['lognormal', 'lognormal'], detection_limits=(1e-8, 5.00) ) compare = ( np.array(((4.60517598e00, 2.18581908e-04), (4.60517592e00, 2.16575944e-04))), @@ -735,7 +783,7 @@ def test_fit_distribution_to_sample_multivariate(): np.random.seed(40) sample = np.full( (1, 10), - 3.14, + math.pi, ) np.random.seed(40) with pytest.raises(IndexError): @@ -780,14 +828,16 @@ def test_fit_distribution_to_sample_multivariate(): sample = np.concatenate( (np.random.normal(0.00, 1.00, size=100000), np.array((np.inf,))) ) - with pytest.raises(ValueError): + with pytest.raises( + ValueError, match='Conversion to standard normal space was unsuccessful' + ): uq.fit_distribution_to_sample(sample, ['normal_cov']) -def test_fit_distribution_to_percentiles(): +def test_fit_distribution_to_percentiles() -> None: # normal, mean of 20 and standard deviation of 10 - percentiles = np.linspace(0.01, 0.99, num=10000) - values = norm.ppf(percentiles, loc=20, scale=10) + percentiles = np.linspace(0.01, 0.99, num=10000).tolist() + values = norm.ppf(percentiles, loc=20, scale=10).tolist() res = uq.fit_distribution_to_percentiles( values, percentiles, ['normal', 'lognormal'] ) @@ -795,7 +845,7 @@ def test_fit_distribution_to_percentiles(): assert np.allclose(res[1], np.array((20.00, 10.00))) # lognormal, median of 20 and beta of 0.4 - ln_values = lognorm.ppf(percentiles, s=0.40, scale=20.00) + ln_values = lognorm.ppf(percentiles, s=0.40, scale=20.00).tolist() res = uq.fit_distribution_to_percentiles( ln_values, percentiles, ['normal', 'lognormal'] ) @@ -803,17 +853,19 @@ def test_fit_distribution_to_percentiles(): assert np.allclose(res[1], np.array((20.0, 0.40))) # unrecognized distribution family - percentiles = np.linspace(0.01, 0.99, num=10000) - values = norm.ppf(percentiles, loc=20, scale=10) - with pytest.raises(ValueError): + percentiles = np.linspace(0.01, 0.99, num=10000).tolist() + values = norm.ppf(percentiles, loc=20, scale=10).tolist() + with pytest.raises( + ValueError, match='Distribution family not recognized: birnbaum-saunders' + ): uq.fit_distribution_to_percentiles( values, percentiles, ['lognormal', 'birnbaum-saunders'] ) -def test__OLS_percentiles(): +def test__OLS_percentiles() -> None: # normal: negative standard deviation - params = np.array((2.50, -0.10)) + params = (2.50, -0.10) perc = np.linspace(1e-2, 1.00 - 1e-2, num=5) values = norm.ppf(perc, loc=20, scale=10) family = 'normal' @@ -821,7 +873,7 @@ def test__OLS_percentiles(): assert res == 10000000000.0 # lognormal: negative median - params = np.array((-1.00, 0.40)) + params = (-1.00, 0.40) perc = np.linspace(1e-2, 1.00 - 1e-2, num=5) values = lognorm.ppf(perc, s=0.40, scale=20.00) family = 'lognormal' @@ -838,7 +890,7 @@ def test__OLS_percentiles(): # The following tests verify the methods of the objects of the module. -def test_NormalRandomVariable(): +def test_NormalRandomVariable() -> None: rv = uq.NormalRandomVariable('rv_name', theta=np.array((0.00, 1.00))) assert rv.name == 'rv_name' np.testing.assert_allclose(rv.theta, np.array((0.00, 1.00))) @@ -847,12 +899,10 @@ def test_NormalRandomVariable(): assert rv.sample_DF is None # confirm that creating an attribute on the fly is not allowed with pytest.raises(AttributeError): - # pylint: disable=assigning-non-slot - # thanks pylint, we are aware of this. - rv.xyz = 123 + rv.xyz = 123 # type: ignore -def test_Normal_STD(): +def test_Normal_STD() -> None: rv = uq.Normal_STD('rv_name', theta=np.array((0.00, 1.00))) assert rv.name == 'rv_name' np.testing.assert_allclose(rv.theta, np.array((0.00, 1.00))) @@ -860,11 +910,13 @@ def test_Normal_STD(): assert rv.RV_set is None assert rv.sample_DF is None with pytest.raises(AttributeError): - rv.xyz = 123 + rv.xyz = 123 # type: ignore -def test_Normal_COV(): - with pytest.raises(ValueError): +def test_Normal_COV() -> None: + with pytest.raises( + ValueError, match='The mean of Normal_COV RVs cannot be zero.' + ): rv = uq.Normal_COV('rv_name', theta=np.array((0.00, 1.00))) rv = uq.Normal_COV('rv_name', theta=np.array((2.00, 1.00))) assert rv.name == 'rv_name' @@ -873,29 +925,29 @@ def test_Normal_COV(): assert rv.RV_set is None assert rv.sample_DF is None with pytest.raises(AttributeError): - rv.xyz = 123 + rv.xyz = 123 # type: ignore -def test_NormalRandomVariable_cdf(): +def test_NormalRandomVariable_cdf() -> None: # test CDF method rv = uq.NormalRandomVariable( 'test_rv', - theta=(1.0, 1.0), + theta=np.array((1.0, 1.0)), truncation_limits=np.array((0.00, np.nan)), ) # evaluate CDF at different points - x = (-1.0, 0.0, 0.5, 1.0, 2.0) + x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0)) cdf = rv.cdf(x) # assert that CDF values are correct assert np.allclose(cdf, (0.0, 0.0, 0.1781461, 0.40571329, 0.81142658), rtol=1e-5) # repeat without truncation limits - rv = uq.NormalRandomVariable('test_rv', theta=(1.0, 1.0)) + rv = uq.NormalRandomVariable('test_rv', theta=np.array((1.0, 1.0))) # evaluate CDF at different points - x = (-1.0, 0.0, 0.5, 1.0, 2.0) + x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0)) cdf = rv.cdf(x) # assert that CDF values are correct @@ -904,73 +956,80 @@ def test_NormalRandomVariable_cdf(): ) -def test_Normal_STD_cdf(): +def test_Normal_STD_cdf() -> None: rv = uq.Normal_STD( 'test_rv', - theta=(1.0, 1.0), + theta=np.array((1.0, 1.0)), truncation_limits=np.array((0.00, np.nan)), ) - x = (-1.0, 0.0, 0.5, 1.0, 2.0) + x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0)) cdf = rv.cdf(x) assert np.allclose(cdf, (0.0, 0.0, 0.1781461, 0.40571329, 0.81142658), rtol=1e-5) -def test_Normal_COV_cdf(): +def test_Normal_COV_cdf() -> None: rv = uq.Normal_COV( 'test_rv', - theta=(1.0, 1.0), + theta=np.array((1.0, 1.0)), truncation_limits=np.array((0.00, np.nan)), ) - x = (-1.0, 0.0, 0.5, 1.0, 2.0) + x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0)) cdf = rv.cdf(x) assert np.allclose(cdf, (0.0, 0.0, 0.1781461, 0.40571329, 0.81142658), rtol=1e-5) -def test_NormalRandomVariable_inverse_transform(): +def test_NormalRandomVariable_inverse_transform() -> None: samples = np.array((0.10, 0.20, 0.30)) - rv = uq.NormalRandomVariable('test_rv', theta=(1.0, 0.5)) + rv = uq.NormalRandomVariable('test_rv', theta=np.array((1.0, 0.5))) rv.uni_sample = samples rv.inverse_transform_sampling() inverse_transform = rv.sample + assert inverse_transform is not None assert np.allclose( inverse_transform, np.array((0.35922422, 0.57918938, 0.73779974)), rtol=1e-5 ) - rv = uq.NormalRandomVariable('test_rv', theta=(1.0, 0.5)) - with pytest.raises(ValueError): + rv = uq.NormalRandomVariable('test_rv', theta=np.array((1.0, 0.5))) + with pytest.raises(ValueError, match='No available uniform sample.'): rv.inverse_transform_sampling() # with truncation limits rv = uq.NormalRandomVariable( - 'test_rv', theta=(1.0, 0.5), truncation_limits=(np.nan, 1.20) + 'test_rv', + theta=np.array((1.0, 0.5)), + truncation_limits=np.array((np.nan, 1.20)), ) rv.uni_sample = samples rv.inverse_transform_sampling() - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) assert np.allclose( inverse_transform, np.array((0.24508018, 0.43936, 0.57313359)), rtol=1e-5 ) rv = uq.NormalRandomVariable( - 'test_rv', theta=(1.0, 0.5), truncation_limits=(0.80, np.nan) + 'test_rv', + theta=np.array((1.0, 0.5)), + truncation_limits=np.array((0.80, np.nan)), ) rv.uni_sample = samples rv.inverse_transform_sampling() - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) assert np.allclose( inverse_transform, np.array((0.8863824, 0.96947866, 1.0517347)), rtol=1e-5 ) rv = uq.NormalRandomVariable( - 'test_rv', theta=(1.0, 0.5), truncation_limits=(0.80, 1.20) + 'test_rv', + theta=np.array((1.0, 0.5)), + truncation_limits=np.array((0.80, 1.20)), ) rv.uni_sample = samples rv.inverse_transform_sampling() - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) assert np.allclose( inverse_transform, np.array((0.84155378, 0.88203946, 0.92176503)), rtol=1e-5 ) @@ -981,47 +1040,56 @@ def test_NormalRandomVariable_inverse_transform(): # normal with problematic truncation limits rv = uq.NormalRandomVariable( - 'test_rv', theta=(1.0, 0.5), truncation_limits=(1e8, 2e8) + 'test_rv', theta=np.array((1.0, 0.5)), truncation_limits=np.array((1e8, 2e8)) ) rv.uni_sample = samples - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match=( + 'The probability mass within the truncation ' + 'limits is too small and the truncated ' + 'distribution cannot be sampled with ' + 'sufficiently high accuracy. This is most probably ' + 'due to incorrect truncation limits set ' + 'for the distribution.' + ), + ): rv.inverse_transform_sampling() -def test_Normal_STD_inverse_transform(): +def test_Normal_STD_inverse_transform() -> None: samples = np.array((0.10, 0.20, 0.30)) - rv = uq.Normal_STD('test_rv', theta=(1.0, 0.5)) + rv = uq.Normal_STD('test_rv', theta=np.array((1.0, 0.5))) rv.uni_sample = samples rv.inverse_transform_sampling() - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) assert np.allclose( inverse_transform, np.array((0.35922422, 0.57918938, 0.73779974)), rtol=1e-5 ) -def test_Normal_COV_inverse_transform(): +def test_Normal_COV_inverse_transform() -> None: samples = np.array((0.10, 0.20, 0.30)) - rv = uq.Normal_COV('test_rv', theta=(1.0, 0.5)) + rv = uq.Normal_COV('test_rv', theta=np.array((1.0, 0.5))) rv.uni_sample = samples rv.inverse_transform_sampling() - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) assert np.allclose( inverse_transform, np.array((0.35922422, 0.57918938, 0.73779974)), rtol=1e-5 ) -def test_LogNormalRandomVariable_cdf(): +def test_LogNormalRandomVariable_cdf() -> None: # lower truncation rv = uq.LogNormalRandomVariable( 'test_rv', - theta=(1.0, 1.0), + theta=np.array((1.0, 1.0)), truncation_limits=np.array((0.10, np.nan)), ) # confirm that creating an attribute on the fly is not allowed with pytest.raises(AttributeError): - # pylint: disable=assigning-non-slot - rv.xyz = 123 - x = (-1.0, 0.0, 0.5, 1.0, 2.0) + rv.xyz = 123 # type: ignore + x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0)) cdf = rv.cdf(x) assert np.allclose( cdf, (0.0, 0.0, 0.23597085, 0.49461712, 0.75326339), rtol=1e-5 @@ -1030,29 +1098,29 @@ def test_LogNormalRandomVariable_cdf(): # upper truncation rv = uq.LogNormalRandomVariable( 'test_rv', - theta=(1.0, 1.0), + theta=np.array((1.0, 1.0)), truncation_limits=np.array((np.nan, 5.00)), ) - x = (-1.0, 0.0, 0.5, 1.0, 2.0) + x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0)) cdf = rv.cdf(x) assert np.allclose( cdf, (0.00, 0.00, 0.25797755, 0.52840734, 0.79883714), rtol=1e-5 ) # no truncation - rv = uq.LogNormalRandomVariable('test_rv', theta=(1.0, 1.0)) - x = (-1.0, 0.0, 0.5, 1.0, 2.0) + rv = uq.LogNormalRandomVariable('test_rv', theta=np.array((1.0, 1.0))) + x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0)) cdf = rv.cdf(x) assert np.allclose(cdf, (0.0, 0.0, 0.2441086, 0.5, 0.7558914), rtol=1e-5) -def test_LogNormalRandomVariable_inverse_transform(): +def test_LogNormalRandomVariable_inverse_transform() -> None: samples = np.array((0.10, 0.20, 0.30)) - rv = uq.LogNormalRandomVariable('test_rv', theta=(1.0, 0.5)) + rv = uq.LogNormalRandomVariable('test_rv', theta=np.array((1.0, 0.5))) rv.uni_sample = samples rv.inverse_transform_sampling() - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) assert np.allclose( inverse_transform, np.array((0.52688352, 0.65651442, 0.76935694)), rtol=1e-5 @@ -1064,12 +1132,12 @@ def test_LogNormalRandomVariable_inverse_transform(): rv = uq.LogNormalRandomVariable( 'test_rv', - theta=(1.0, 0.5), + theta=np.array((1.0, 0.5)), truncation_limits=np.array((0.50, np.nan)), ) rv.uni_sample = samples rv.inverse_transform_sampling() - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) assert np.allclose( inverse_transform, np.array((0.62614292, 0.73192471, 0.83365823)), rtol=1e-5 ) @@ -1079,85 +1147,84 @@ def test_LogNormalRandomVariable_inverse_transform(): # # lognormal without values to sample from - rv = uq.LogNormalRandomVariable('test_rv', theta=(1.0, 0.5)) - with pytest.raises(ValueError): + rv = uq.LogNormalRandomVariable('test_rv', theta=np.array((1.0, 0.5))) + with pytest.raises(ValueError, match='No available uniform sample.'): rv.inverse_transform_sampling() -def test_UniformRandomVariable_cdf(): +def test_UniformRandomVariable_cdf() -> None: # uniform, both theta values - rv = uq.UniformRandomVariable('test_rv', theta=(0.0, 1.0)) + rv = uq.UniformRandomVariable('test_rv', theta=np.array((0.0, 1.0))) # confirm that creating an attribute on the fly is not allowed with pytest.raises(AttributeError): - # pylint: disable=assigning-non-slot - rv.xyz = 123 - x = (-1.0, 0.0, 0.5, 1.0, 2.0) + rv.xyz = 123 # type: ignore + x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0)) cdf = rv.cdf(x) assert np.allclose(cdf, (0.0, 0.0, 0.5, 1.0, 1.0), rtol=1e-5) with warnings.catch_warnings(): warnings.simplefilter('ignore') # uniform, only upper theta value ( -inf implied ) - rv = uq.UniformRandomVariable('test_rv', theta=(np.nan, 100.00)) - x = (-1.0, 0.0, 0.5, 1.0, 2.0) + rv = uq.UniformRandomVariable('test_rv', theta=np.array((np.nan, 100.00))) + x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0)) cdf = rv.cdf(x) assert np.all(np.isnan(cdf)) # uniform, only lower theta value ( +inf implied ) - rv = uq.UniformRandomVariable('test_rv', theta=(0.00, np.nan)) - x = (-1.0, 0.0, 0.5, 1.0, 2.0) + rv = uq.UniformRandomVariable('test_rv', theta=np.array((0.00, np.nan))) + x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0)) cdf = rv.cdf(x) assert np.allclose(cdf, (0.0, 0.0, 0.0, 0.0, 0.0), rtol=1e-5) # uniform, with truncation limits rv = uq.UniformRandomVariable( 'test_rv', - theta=(0.0, 10.0), + theta=np.array((0.0, 10.0)), truncation_limits=np.array((0.00, 1.00)), ) - x = (-1.0, 0.0, 0.5, 1.0, 2.0) + x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0)) cdf = rv.cdf(x) assert np.allclose(cdf, (0.0, 0.0, 0.5, 1.0, 1.0), rtol=1e-5) -def test_UniformRandomVariable_inverse_transform(): - rv = uq.UniformRandomVariable('test_rv', theta=(0.0, 1.0)) +def test_UniformRandomVariable_inverse_transform() -> None: + rv = uq.UniformRandomVariable('test_rv', theta=np.array((0.0, 1.0))) samples = np.array((0.10, 0.20, 0.30)) rv.uni_sample = samples rv.inverse_transform_sampling() - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) assert np.allclose(inverse_transform, samples, rtol=1e-5) # # uniform with unspecified bounds # - rv = uq.UniformRandomVariable('test_rv', theta=(np.nan, 1.0)) + rv = uq.UniformRandomVariable('test_rv', theta=np.array((np.nan, 1.0))) samples = np.array((0.10, 0.20, 0.30)) rv.uni_sample = samples rv.inverse_transform_sampling() - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) assert np.all(np.isnan(inverse_transform)) - rv = uq.UniformRandomVariable('test_rv', theta=(0.00, np.nan)) + rv = uq.UniformRandomVariable('test_rv', theta=np.array((0.00, np.nan))) rv.uni_sample = samples rv.inverse_transform_sampling() - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) assert np.all(np.isinf(inverse_transform)) rv = uq.UniformRandomVariable( 'test_rv', - theta=(0.00, 1.00), + theta=np.array((0.00, 1.00)), truncation_limits=np.array((0.20, 0.80)), ) rv.uni_sample = samples rv.inverse_transform_sampling() - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) assert np.allclose(inverse_transform, np.array((0.26, 0.32, 0.38)), rtol=1e-5) # sample as a pandas series, with a log() map rv.f_map = np.log - assert rv.sample_DF.to_dict() == { + assert ensure_value(rv.sample_DF).to_dict() == { 0: -1.3470736479666092, 1: -1.1394342831883646, 2: -0.9675840262617056, @@ -1168,12 +1235,12 @@ def test_UniformRandomVariable_inverse_transform(): # # uniform without values to sample from - rv = uq.UniformRandomVariable('test_rv', theta=(0.0, 1.0)) - with pytest.raises(ValueError): + rv = uq.UniformRandomVariable('test_rv', theta=np.array((0.0, 1.0))) + with pytest.raises(ValueError, match='No available uniform sample.'): rv.inverse_transform_sampling() -def test_WeibullRandomVariable(): +def test_WeibullRandomVariable() -> None: rv = uq.WeibullRandomVariable('rv_name', theta=np.array((1.5, 2.0))) assert rv.name == 'rv_name' np.testing.assert_allclose(rv.theta, np.array((1.5, 2.0))) @@ -1181,44 +1248,44 @@ def test_WeibullRandomVariable(): assert rv.RV_set is None assert rv.sample_DF is None with pytest.raises(AttributeError): - rv.xyz = 123 + rv.xyz = 123 # type: ignore -def test_WeibullRandomVariable_cdf(): +def test_WeibullRandomVariable_cdf() -> None: rv = uq.WeibullRandomVariable( 'test_rv', - theta=(1.5, 2.0), + theta=np.array((1.5, 2.0)), truncation_limits=np.array((0.5, 2.5)), ) - x = (0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0) + x = np.array((0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0)) cdf = rv.cdf(x) expected_cdf = np.array([0.0, 0.0, 0.30463584, 0.63286108, 0.87169261, 1.0, 1.0]) assert np.allclose(cdf, expected_cdf, rtol=1e-5) - rv = uq.WeibullRandomVariable('test_rv', theta=(1.5, 2.0)) + rv = uq.WeibullRandomVariable('test_rv', theta=np.array((1.5, 2.0))) cdf = rv.cdf(x) expected_cdf_no_trunc = weibull_min.cdf(x, 2.0, scale=1.5) assert np.allclose(cdf, expected_cdf_no_trunc, rtol=1e-5) -def test_WeibullRandomVariable_inverse_transform(): +def test_WeibullRandomVariable_inverse_transform() -> None: samples = np.array((0.10, 0.20, 0.30)) - rv = uq.WeibullRandomVariable('test_rv', theta=(1.5, 2.0)) + rv = uq.WeibullRandomVariable('test_rv', theta=np.array((1.5, 2.0))) rv.uni_sample = samples rv.inverse_transform_sampling() - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) expected_samples = weibull_min.ppf(samples, 2.0, scale=1.5) assert np.allclose(inverse_transform, expected_samples, rtol=1e-5) rv = uq.WeibullRandomVariable( - 'test_rv', theta=(1.5, 2.0), truncation_limits=(0.5, 2.5) + 'test_rv', theta=np.array((1.5, 2.0)), truncation_limits=np.array((0.5, 2.5)) ) rv.uni_sample = samples rv.inverse_transform_sampling() - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) truncated_samples = weibull_min.ppf( samples * ( @@ -1232,64 +1299,97 @@ def test_WeibullRandomVariable_inverse_transform(): assert np.allclose(inverse_transform, truncated_samples, rtol=1e-5) -def test_MultinomialRandomVariable(): +def test_MultinomialRandomVariable() -> None: # multinomial with invalid p values provided in the theta vector - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match=re.escape( + 'The set of p values provided for a multinomial ' + 'distribution shall sum up to less than or equal to 1.0. ' + 'The provided values sum up to 43.0. ' + 'p = [ 0.2 0.7 0.1 42. ] .' + ), + ): uq.MultinomialRandomVariable( 'rv_invalid', np.array((0.20, 0.70, 0.10, 42.00)) ) -def test_MultilinearCDFRandomVariable(): +def test_MultilinearCDFRandomVariable() -> None: # multilinear CDF: cases that should fail x_values = (0.00, 1.00, 2.00, 3.00, 4.00) y_values = (100.00, 0.20, 0.20, 0.80, 1.00) values = np.column_stack((x_values, y_values)) - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match='For multilinear CDF random variables, y_1 should be set to 0.00', + ): uq.MultilinearCDFRandomVariable('test_rv', theta=values) x_values = (0.00, 1.00, 2.00, 3.00, 4.00) y_values = (0.00, 0.20, 0.20, 0.80, 0.80) values = np.column_stack((x_values, y_values)) - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match='For multilinear CDF random variables, y_n should be set to 1.00', + ): uq.MultilinearCDFRandomVariable('test_rv', theta=values) x_values = (0.00, 3.00, 1.00, 2.00, 4.00) y_values = (0.00, 0.25, 0.50, 0.75, 1.00) values = np.column_stack((x_values, y_values)) - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match='For multilinear CDF random variables, Xs should be specified in ascending order', + ): uq.MultilinearCDFRandomVariable('test_rv', theta=values) x_values = (0.00, 1.00, 2.00, 3.00, 4.00) y_values = (0.00, 0.75, 0.50, 0.25, 1.00) values = np.column_stack((x_values, y_values)) - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match='For multilinear CDF random variables, Ys should be specified in ascending order', + ): uq.MultilinearCDFRandomVariable('test_rv', theta=values) x_values = (0.00, 1.00, 2.00, 3.00, 4.00) y_values = (0.00, 0.50, 0.50, 0.50, 1.00) values = np.column_stack((x_values, y_values)) - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match=( + 'For multilinear CDF random variables, ' + 'Ys should be specified in strictly ascending order' + ), + ): uq.MultilinearCDFRandomVariable('test_rv', theta=values) x_values = (0.00, 2.00, 2.00, 3.00, 4.00) y_values = (0.00, 0.20, 0.40, 0.50, 1.00) values = np.column_stack((x_values, y_values)) - with pytest.raises(ValueError): + with pytest.raises( + ValueError, + match=( + 'For multilinear CDF random variables, ' + 'Xs should be specified in strictly ascending order' + ), + ): uq.MultilinearCDFRandomVariable('test_rv', theta=values) -def test_MultilinearCDFRandomVariable_cdf(): +def test_MultilinearCDFRandomVariable_cdf() -> None: x_values = (0.00, 1.00, 2.00, 3.00, 4.00) y_values = (0.00, 0.20, 0.30, 0.80, 1.00) values = np.column_stack((x_values, y_values)) rv = uq.MultilinearCDFRandomVariable('test_rv', theta=values) # confirm that creating an attribute on the fly is not allowed with pytest.raises(AttributeError): - # pylint: disable=assigning-non-slot - rv.xyz = 123 - x = (-100.00, 0.00, 0.50, 1.00, 1.50, 2.00, 2.50, 3.00, 3.50, 4.00, 100.00) + rv.xyz = 123 # type: ignore + x = np.array( + (-100.00, 0.00, 0.50, 1.00, 1.50, 2.00, 2.50, 3.00, 3.50, 4.00, 100.00) + ) cdf = rv.cdf(x) assert np.allclose( @@ -1299,7 +1399,7 @@ def test_MultilinearCDFRandomVariable_cdf(): ) -def test_MultilinearCDFRandomVariable_inverse_transform(): +def test_MultilinearCDFRandomVariable_inverse_transform() -> None: x_values = (0.00, 1.00, 2.00, 3.00, 4.00) y_values = (0.00, 0.20, 0.30, 0.80, 1.00) values = np.column_stack((x_values, y_values)) @@ -1307,7 +1407,7 @@ def test_MultilinearCDFRandomVariable_inverse_transform(): rv.uni_sample = np.array((0.00, 0.1, 0.2, 0.5, 0.8, 0.9, 1.00)) rv.inverse_transform_sampling() - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) assert np.allclose( inverse_transform, np.array((0.00, 0.50, 1.00, 2.40, 3.00, 3.50, 4.00)), @@ -1315,100 +1415,95 @@ def test_MultilinearCDFRandomVariable_inverse_transform(): ) -def test_EmpiricalRandomVariable_inverse_transform(): +def test_EmpiricalRandomVariable_inverse_transform() -> None: samples = np.array((0.10, 0.20, 0.30)) rv_empirical = uq.EmpiricalRandomVariable( - 'test_rv_empirical', raw_samples=(1.00, 2.00, 3.00, 4.00) + 'test_rv_empirical', theta=np.array((1.00, 2.00, 3.00, 4.00)) ) # confirm that creating an attribute on the fly is not allowed with pytest.raises(AttributeError): - # pylint: disable=assigning-non-slot - rv_empirical.xyz = 123 + rv_empirical.xyz = 123 # type: ignore samples = np.array((0.10, 0.50, 0.90)) rv_empirical.uni_sample = samples rv_empirical.inverse_transform_sampling() - inverse_transform = rv_empirical.sample + inverse_transform = ensure_value(rv_empirical.sample) assert np.allclose(inverse_transform, np.array((1.00, 3.00, 4.00)), rtol=1e-5) rv_coupled = uq.CoupledEmpiricalRandomVariable( 'test_rv_coupled', - raw_samples=np.array((1.00, 2.00, 3.00, 4.00)), + theta=np.array((1.00, 2.00, 3.00, 4.00)), ) rv_coupled.inverse_transform_sampling(sample_size=6) - inverse_transform = rv_coupled.sample + inverse_transform = ensure_value(rv_coupled.sample) assert np.allclose( inverse_transform, np.array((1.00, 2.00, 3.00, 4.00, 1.00, 2.00)), rtol=1e-5 ) -def test_DeterministicRandomVariable_inverse_transform(): +def test_DeterministicRandomVariable_inverse_transform() -> None: rv = uq.DeterministicRandomVariable('test_rv', theta=np.array((0.00,))) rv.inverse_transform_sampling(4) - inverse_transform = rv.sample + inverse_transform = ensure_value(rv.sample) assert np.allclose( inverse_transform, np.array((0.00, 0.00, 0.00, 0.00)), rtol=1e-5 ) -def test_RandomVariable_Set(): +def test_RandomVariable_Set() -> None: # a set of two random variables - rv_1 = uq.NormalRandomVariable('rv1', theta=(1.0, 1.0)) - rv_2 = uq.NormalRandomVariable('rv2', theta=(1.0, 1.0)) - rv_set = uq.RandomVariableSet( # noqa: F841 - 'test_set', (rv_1, rv_2), np.array(((1.0, 0.50), (0.50, 1.0))) + rv_1 = uq.NormalRandomVariable('rv1', theta=np.array((1.0, 1.0))) + rv_2 = uq.NormalRandomVariable('rv2', theta=np.array((1.0, 1.0))) + rv_set = uq.RandomVariableSet( + 'test_set', [rv_1, rv_2], np.array(((1.0, 0.50), (0.50, 1.0))) ) # size of the set assert rv_set.size == 2 # a set with only one random variable - rv_1 = uq.NormalRandomVariable('rv1', theta=(1.0, 1.0)) - rv_set = uq.RandomVariableSet( # noqa: F841 - 'test_set', (rv_1,), np.array(((1.0, 0.50),)) - ) - + rv_1 = uq.NormalRandomVariable('rv1', theta=np.array((1.0, 1.0))) + rv_set = uq.RandomVariableSet('test_set', [rv_1], np.array(((1.0, 0.50),))) -def test_RandomVariable_perfect_correlation(): +def test_RandomVariable_perfect_correlation() -> None: # Test that the `.anchor` attribute is propagated correctly - rv_1 = uq.NormalRandomVariable('rv1', theta=(1.0, 1.0)) - rv_2 = uq.NormalRandomVariable('rv2', theta=(1.0, 1.0), anchor=rv_1) + rv_1 = uq.NormalRandomVariable('rv1', theta=np.array((1.0, 1.0))) + rv_2 = uq.NormalRandomVariable('rv2', theta=np.array((1.0, 1.0)), anchor=rv_1) rv_1.uni_sample = np.random.random(size=10) assert np.all(rv_2.uni_sample == rv_1.uni_sample) - rv_1 = uq.NormalRandomVariable('rv1', theta=(1.0, 1.0)) - rv_2 = uq.NormalRandomVariable('rv2', theta=(1.0, 1.0)) + rv_1 = uq.NormalRandomVariable('rv1', theta=np.array((1.0, 1.0))) + rv_2 = uq.NormalRandomVariable('rv2', theta=np.array((1.0, 1.0))) rv_1.uni_sample = np.random.random(size=10) assert rv_2.uni_sample is None -def test_RandomVariable_Set_apply_correlation(reset=False): +def test_RandomVariable_Set_apply_correlation(*, reset: bool = False) -> None: data_dir = ( 'pelicun/tests/basic/data/uq/test_random_variable_set_apply_correlation' ) - file_incr = 0 # correlated, uniform np.random.seed(40) - rv_1 = uq.UniformRandomVariable(name='rv1', theta=(-5.0, 5.0)) - rv_2 = uq.UniformRandomVariable(name='rv2', theta=(-5.0, 5.0)) + rv_1 = uq.UniformRandomVariable(name='rv1', theta=np.array((-5.0, 5.0))) + rv_2 = uq.UniformRandomVariable(name='rv2', theta=np.array((-5.0, 5.0))) rv_1.uni_sample = np.random.random(size=100) rv_2.uni_sample = np.random.random(size=100) rvs = uq.RandomVariableSet( - name='test_set', RV_list=[rv_1, rv_2], Rho=np.array(((1.0, 0.5), (0.5, 1.0))) + name='test_set', rv_list=[rv_1, rv_2], rho=np.array(((1.0, 0.5), (0.5, 1.0))) ) rvs.apply_correlation() - for rv in (rv_1, rv_2): - res = rv.uni_sample - file_incr += 1 + for i, rv in enumerate((rv_1, rv_2)): + res = ensure_value(rv.uni_sample) + file_incr = i + 1 filename = f'{data_dir}/test_{file_incr}.pcl' if reset: export_pickle(filename, res) @@ -1420,13 +1515,13 @@ def test_RandomVariable_Set_apply_correlation(reset=False): rv_1.inverse_transform_sampling() rv_2.inverse_transform_sampling() rvset_sample = rvs.sample - assert set(rvset_sample.keys()) == set(('rv1', 'rv2')) + assert set(rvset_sample.keys()) == {'rv1', 'rv2'} vals = list(rvset_sample.values()) assert np.all(vals[0] == rv_1.sample) assert np.all(vals[1] == rv_2.sample) -def test_RandomVariable_Set_apply_correlation_special(): +def test_RandomVariable_Set_apply_correlation_special() -> None: # This function tests the apply_correlation method of the # RandomVariableSet class when given special input conditions. # The first test checks that the method works when given a non @@ -1442,8 +1537,8 @@ def test_RandomVariable_Set_apply_correlation_special(): # non positive semidefinite correlation matrix rho = np.array(((1.00, 0.50), (0.50, -1.00))) - rv_1 = uq.NormalRandomVariable('rv1', theta=[5.0, 0.1]) - rv_2 = uq.NormalRandomVariable('rv2', theta=[5.0, 0.1]) + rv_1 = uq.NormalRandomVariable('rv1', theta=np.array((5.0, 0.1))) + rv_2 = uq.NormalRandomVariable('rv2', theta=np.array((5.0, 0.1))) rv_1.uni_sample = np.random.random(size=100) rv_2.uni_sample = np.random.random(size=100) rv_set = uq.RandomVariableSet('rv_set', [rv_1, rv_2], rho) @@ -1451,8 +1546,8 @@ def test_RandomVariable_Set_apply_correlation_special(): # non full rank matrix rho = np.array(((0.00, 0.00), (0.0, 0.0))) - rv_1 = uq.NormalRandomVariable('rv1', theta=[5.0, 0.1]) - rv_2 = uq.NormalRandomVariable('rv2', theta=[5.0, 0.1]) + rv_1 = uq.NormalRandomVariable('rv1', theta=np.array((5.0, 0.1))) + rv_2 = uq.NormalRandomVariable('rv2', theta=np.array((5.0, 0.1))) rv_1.uni_sample = np.random.random(size=100) rv_2.uni_sample = np.random.random(size=100) rv_set = uq.RandomVariableSet('rv_set', [rv_1, rv_2], rho) @@ -1462,23 +1557,23 @@ def test_RandomVariable_Set_apply_correlation_special(): ) -def test_RandomVariable_Set_orthotope_density(reset=False): +def test_RandomVariable_Set_orthotope_density(*, reset: bool = False) -> None: data_dir = ( 'pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density' ) # create some random variables rv_1 = uq.Normal_COV( - 'rv1', theta=[5.0, 0.1], truncation_limits=np.array((np.nan, 10.0)) + 'rv1', theta=np.array((5.0, 0.1)), truncation_limits=np.array((np.nan, 10.0)) ) - rv_2 = uq.LogNormalRandomVariable('rv2', theta=[10.0, 0.2]) - rv_3 = uq.UniformRandomVariable('rv3', theta=[13.0, 17.0]) - rv_4 = uq.UniformRandomVariable('rv4', theta=[0.0, 1.0]) - rv_5 = uq.UniformRandomVariable('rv5', theta=[0.0, 1.0]) + rv_2 = uq.LogNormalRandomVariable('rv2', theta=np.array((10.0, 0.2))) + rv_3 = uq.UniformRandomVariable('rv3', theta=np.array((13.0, 17.0))) + rv_4 = uq.UniformRandomVariable('rv4', theta=np.array((0.0, 1.0))) + rv_5 = uq.UniformRandomVariable('rv5', theta=np.array((0.0, 1.0))) # create a random variable set rv_set = uq.RandomVariableSet( - 'rv_set', (rv_1, rv_2, rv_3, rv_4, rv_5), np.identity(5) + 'rv_set', [rv_1, rv_2, rv_3, rv_4, rv_5], np.identity(5) ) # define test cases @@ -1487,7 +1582,7 @@ def test_RandomVariable_Set_orthotope_density(reset=False): ( np.array([4.0, 9.0, 14.0, np.nan]), np.array([6.0, 11.0, 16.0, 0.80]), - ('rv1', 'rv2', 'rv3', 'rv4'), + ['rv1', 'rv2', 'rv3', 'rv4'], ), ( np.array([4.0, 9.0, 14.0, np.nan, 0.20]), @@ -1522,7 +1617,7 @@ def test_RandomVariable_Set_orthotope_density(reset=False): assert np.allclose(res, compare) -def test_RandomVariableRegistry_generate_sample(reset=False): +def test_RandomVariableRegistry_generate_sample(*, reset: bool = False) -> None: data_dir = ( 'pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample' ) @@ -1537,14 +1632,14 @@ def test_RandomVariableRegistry_generate_sample(reset=False): rng = np.random.default_rng(0) rv_registry_single = uq.RandomVariableRegistry(rng) # create the random variable and add it to the registry - RV = uq.NormalRandomVariable('x', theta=[1.0, 1.0]) - rv_registry_single.add_RV(RV) + rv = uq.NormalRandomVariable('x', theta=np.array((1.0, 1.0))) + rv_registry_single.add_RV(rv) # Generate a sample sample_size = 1000 rv_registry_single.generate_sample(sample_size, method) - res = rv_registry_single.RV_sample['x'] + res = ensure_value(rv_registry_single.RV_sample['x']) assert len(res) == sample_size file_incr += 1 @@ -1566,13 +1661,15 @@ def test_RandomVariableRegistry_generate_sample(reset=False): # create a random variable registry and add some random variables to it rng = np.random.default_rng(4) rv_registry = uq.RandomVariableRegistry(rng) - rv_1 = uq.Normal_COV('rv1', theta=[5.0, 0.1]) - rv_2 = uq.LogNormalRandomVariable('rv2', theta=[10.0, 0.2]) - rv_3 = uq.UniformRandomVariable('rv3', theta=[13.0, 17.0]) + rv_1 = uq.Normal_COV('rv1', theta=np.array((5.0, 0.1))) + rv_2 = uq.LogNormalRandomVariable('rv2', theta=np.array((10.0, 0.2))) + rv_3 = uq.UniformRandomVariable('rv3', theta=np.array((13.0, 17.0))) rv_registry.add_RV(rv_1) rv_registry.add_RV(rv_2) rv_registry.add_RV(rv_3) - with pytest.raises(ValueError): + with pytest.raises( + ValueError, match='RV rv3 already exists in the registry.' + ): rv_registry.add_RV(rv_3) # create a random variable set and add it to the registry @@ -1582,8 +1679,8 @@ def test_RandomVariableRegistry_generate_sample(reset=False): rv_registry.add_RV_set(rv_set) # add some more random variables that are not part of the set - rv_4 = uq.Normal_COV('rv4', theta=[14.0, 0.30]) - rv_5 = uq.Normal_COV('rv5', theta=[15.0, 0.50]) + rv_4 = uq.Normal_COV('rv4', theta=np.array((14.0, 0.30))) + rv_5 = uq.Normal_COV('rv5', theta=np.array((15.0, 0.50))) rv_registry.add_RV(rv_4) rv_registry.add_RV(rv_5) @@ -1591,7 +1688,7 @@ def test_RandomVariableRegistry_generate_sample(reset=False): # verify that all samples have been generated as expected for rv_name in (f'rv{i + 1}' for i in range(5)): - res = rv_registry.RV_sample[rv_name] + res = ensure_value(rv_registry.RV_sample[rv_name]) file_incr += 1 filename = f'{data_dir}/test_{file_incr}.pcl' if reset: @@ -1600,17 +1697,19 @@ def test_RandomVariableRegistry_generate_sample(reset=False): assert np.allclose(res, compare) # obtain multiple RVs from the registry - rv_dictionary = rv_registry.RVs(('rv1', 'rv2')) + rv_dictionary = rv_registry.RVs(['rv1', 'rv2']) assert 'rv1' in rv_dictionary assert 'rv2' in rv_dictionary assert 'rv3' not in rv_dictionary -def test_rv_class_map(): +def test_rv_class_map() -> None: rv_class = uq.rv_class_map('normal_std') assert rv_class.__name__ == 'Normal_STD' - with pytest.raises(ValueError): + with pytest.raises( + ValueError, match=re.escape('Unsupported distribution: ') + ): uq.rv_class_map('') diff --git a/pelicun/tests/code_repetition_checker.py b/pelicun/tests/code_repetition_checker.py index 3bf66f4f6..d877e4b2a 100644 --- a/pelicun/tests/code_repetition_checker.py +++ b/pelicun/tests/code_repetition_checker.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -44,23 +43,27 @@ """ from __future__ import annotations -from glob2 import glob # type: ignore -# pylint: disable=missing-any-param-doc +from pathlib import Path + +from glob2 import glob # type: ignore -def main(file): +def main(file: str) -> None: """ Identifies and displays repeated consecutive line blocks within a file, including their line numbers. - Args: - file: Path to the file to be checked for duplicates. + Parameters + ---------- + file: str + Path to the file to be checked for duplicates. + """ # file = 'tests/test_uq.py' group = 15 # find repeated blocks this many lines - with open(file, 'r', encoding='utf-8') as f: + with Path(file).open(encoding='utf-8') as f: contents = f.readlines() num_lines = len(contents) for i in range(0, num_lines, group): @@ -68,22 +71,22 @@ def main(file): for j in range(i + 1, num_lines): jlines = contents[j : j + group] if glines == jlines: - print(f'{i, j}: ') + print(f'{i, j}: ') # noqa: T201 for k in range(group): - print(f' {jlines[k]}', end='') - print() + print(f' {jlines[k]}', end='') # noqa: T201 + print() # noqa: T201 -def all_test_files(): +def all_test_files() -> None: """ Searches for all Python test files in the 'tests' directory and runs the main function to find and print repeated line blocks in each file. """ test_files = glob('tests/*.py') for file in test_files: - print() - print(file) - print() + print() # noqa: T201 + print(file) # noqa: T201 + print() # noqa: T201 main(file) diff --git a/pelicun/tests/dl_calculation/e1/test_e1.py b/pelicun/tests/dl_calculation/e1/test_e1.py index 08267d4c5..681e88aa8 100644 --- a/pelicun/tests/dl_calculation/e1/test_e1.py +++ b/pelicun/tests/dl_calculation/e1/test_e1.py @@ -1,30 +1,57 @@ -""" -DL Calculation Example 1 +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + +"""DL Calculation Example 1.""" -""" - -import tempfile import os import shutil +import tempfile from pathlib import Path -import pytest -from pelicun.warnings import PelicunWarning -from pelicun.tools.DL_calculation import run_pelicun +from typing import Generator +import pytest -# pylint: disable=missing-function-docstring -# pylint: disable=missing-yield-doc -# pylint: disable=missing-yield-type-doc -# pylint: disable=redefined-outer-name +from pelicun.pelicun_warnings import PelicunWarning +from pelicun.tools.DL_calculation import run_pelicun @pytest.fixture -def obtain_temp_dir(): - +def obtain_temp_dir() -> Generator: # get the path of this file this_file = __file__ - initial_dir = os.getcwd() + initial_dir = Path.cwd() this_dir = str(Path(this_file).parent) temp_dir = tempfile.mkdtemp() @@ -36,9 +63,11 @@ def obtain_temp_dir(): os.chdir(initial_dir) -def test_dl_calculation_1(obtain_temp_dir): +def test_dl_calculation_1(obtain_temp_dir: str) -> None: + this_dir: str + temp_dir: str - this_dir, temp_dir = obtain_temp_dir + this_dir, temp_dir = obtain_temp_dir # type: ignore # Copy all input files to a temporary directory. # All outputs will also go there. @@ -60,13 +89,12 @@ def test_dl_calculation_1(obtain_temp_dir): demand_file='response.csv', config_path='8000-AIM.json', output_path=None, - coupled_EDP=True, - realizations='100', + coupled_edp=True, + realizations=100, auto_script_path='PelicunDefault/Hazus_Earthquake_IM.py', detailed_results=False, output_format=None, custom_model_dir=None, - color_warnings=False, ) # diff --git a/pelicun/tests/dl_calculation/e2/test_e2.py b/pelicun/tests/dl_calculation/e2/test_e2.py index 13accec25..dab04ab04 100644 --- a/pelicun/tests/dl_calculation/e2/test_e2.py +++ b/pelicun/tests/dl_calculation/e2/test_e2.py @@ -1,30 +1,59 @@ -""" -DL Calculation Example 2 +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + +"""DL Calculation Example 2.""" + +from __future__ import annotations -""" - -import tempfile import os import shutil +import tempfile from pathlib import Path -import pytest -from pelicun.warnings import PelicunWarning -from pelicun.tools.DL_calculation import run_pelicun +from typing import Generator +import pytest -# pylint: disable=missing-function-docstring -# pylint: disable=missing-yield-doc -# pylint: disable=missing-yield-type-doc -# pylint: disable=redefined-outer-name +from pelicun.pelicun_warnings import PelicunWarning +from pelicun.tools.DL_calculation import run_pelicun @pytest.fixture -def obtain_temp_dir(): - +def obtain_temp_dir() -> Generator: # get the path of this file this_file = __file__ - initial_dir = os.getcwd() + initial_dir = Path.cwd() this_dir = str(Path(this_file).parent) temp_dir = tempfile.mkdtemp() @@ -36,8 +65,7 @@ def obtain_temp_dir(): os.chdir(initial_dir) -def test_dl_calculation_2(obtain_temp_dir): - +def test_dl_calculation_2(obtain_temp_dir: tuple[str, str]) -> None: this_dir, temp_dir = obtain_temp_dir # Copy all input files to a temporary directory. @@ -57,13 +85,12 @@ def test_dl_calculation_2(obtain_temp_dir): demand_file='response.csv', config_path='1-AIM.json', output_path=None, - coupled_EDP=True, - realizations='100', + coupled_edp=True, + realizations=100, auto_script_path='PelicunDefault/Hazus_Earthquake_Story.py', detailed_results=False, output_format=None, custom_model_dir=None, - color_warnings=False, ) # diff --git a/pelicun/tests/dl_calculation/e3/test_e3.py b/pelicun/tests/dl_calculation/e3/test_e3.py index bfd3d39b4..3370abdf7 100644 --- a/pelicun/tests/dl_calculation/e3/test_e3.py +++ b/pelicun/tests/dl_calculation/e3/test_e3.py @@ -1,30 +1,59 @@ -""" -DL Calculation Example 3 +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + +"""DL Calculation Example 3.""" + +from __future__ import annotations -""" - -import tempfile import os import shutil +import tempfile from pathlib import Path -import pytest -from pelicun.warnings import PelicunWarning -from pelicun.tools.DL_calculation import run_pelicun +from typing import Generator +import pytest -# pylint: disable=missing-function-docstring -# pylint: disable=missing-yield-doc -# pylint: disable=missing-yield-type-doc -# pylint: disable=redefined-outer-name +from pelicun.pelicun_warnings import PelicunWarning +from pelicun.tools.DL_calculation import run_pelicun @pytest.fixture -def obtain_temp_dir(): - +def obtain_temp_dir() -> Generator: # get the path of this file this_file = __file__ - initial_dir = os.getcwd() + initial_dir = Path.cwd() this_dir = str(Path(this_file).parent) temp_dir = tempfile.mkdtemp() @@ -36,8 +65,7 @@ def obtain_temp_dir(): os.chdir(initial_dir) -def test_dl_calculation_3(obtain_temp_dir): - +def test_dl_calculation_3(obtain_temp_dir: tuple[str, str]) -> None: this_dir, temp_dir = obtain_temp_dir # Copy all input files to a temporary directory. @@ -55,13 +83,12 @@ def test_dl_calculation_3(obtain_temp_dir): demand_file='response.csv', config_path='170-AIM.json', output_path=None, - coupled_EDP=False, - realizations='100', + coupled_edp=False, + realizations=100, auto_script_path='PelicunDefault/Hazus_Earthquake_Story.py', detailed_results=False, output_format=None, custom_model_dir=None, - color_warnings=False, ) # diff --git a/pelicun/tests/dl_calculation/e4/test_e4.py b/pelicun/tests/dl_calculation/e4/test_e4.py index 2bdc0527f..f4cbd6278 100644 --- a/pelicun/tests/dl_calculation/e4/test_e4.py +++ b/pelicun/tests/dl_calculation/e4/test_e4.py @@ -1,30 +1,59 @@ -""" -DL Calculation Example 4 +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + +"""DL Calculation Example 4.""" + +from __future__ import annotations -""" - -import tempfile import os import shutil +import tempfile from pathlib import Path -import pytest -from pelicun.warnings import PelicunWarning -from pelicun.tools.DL_calculation import run_pelicun +from typing import Generator +import pytest -# pylint: disable=missing-function-docstring -# pylint: disable=missing-yield-doc -# pylint: disable=missing-yield-type-doc -# pylint: disable=redefined-outer-name +from pelicun.pelicun_warnings import PelicunWarning +from pelicun.tools.DL_calculation import run_pelicun @pytest.fixture -def obtain_temp_dir(): - +def obtain_temp_dir() -> Generator: # get the path of this file this_file = __file__ - initial_dir = os.getcwd() + initial_dir = Path.cwd() this_dir = str(Path(this_file).parent) temp_dir = tempfile.mkdtemp() @@ -36,8 +65,7 @@ def obtain_temp_dir(): os.chdir(initial_dir) -def test_dl_calculation_4(obtain_temp_dir): - +def test_dl_calculation_4(obtain_temp_dir: tuple[str, str]) -> None: this_dir, temp_dir = obtain_temp_dir # Copy all input files to a temporary directory. @@ -60,13 +88,12 @@ def test_dl_calculation_4(obtain_temp_dir): demand_file='response.csv', config_path='0-AIM.json', output_path=None, - coupled_EDP=True, - realizations='100', + coupled_edp=True, + realizations=100, auto_script_path='PelicunDefault/Hazus_Earthquake_Story.py', detailed_results=False, output_format=None, custom_model_dir=None, - color_warnings=False, ) # diff --git a/pelicun/tests/dl_calculation/e5/test_e5.py b/pelicun/tests/dl_calculation/e5/test_e5.py index 8d1fb47e3..ae1b71d30 100644 --- a/pelicun/tests/dl_calculation/e5/test_e5.py +++ b/pelicun/tests/dl_calculation/e5/test_e5.py @@ -1,30 +1,59 @@ -""" -DL Calculation Example 5 +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + +"""DL Calculation Example 5.""" + +from __future__ import annotations -""" - -import tempfile import os import shutil +import tempfile from pathlib import Path -import pytest -from pelicun.warnings import PelicunWarning -from pelicun.tools.DL_calculation import run_pelicun +from typing import Generator +import pytest -# pylint: disable=missing-function-docstring -# pylint: disable=missing-yield-doc -# pylint: disable=missing-yield-type-doc -# pylint: disable=redefined-outer-name +from pelicun.pelicun_warnings import PelicunWarning +from pelicun.tools.DL_calculation import run_pelicun @pytest.fixture -def obtain_temp_dir(): - +def obtain_temp_dir() -> Generator: # get the path of this file this_file = __file__ - initial_dir = os.getcwd() + initial_dir = Path.cwd() this_dir = str(Path(this_file).parent) temp_dir = tempfile.mkdtemp() @@ -36,8 +65,7 @@ def obtain_temp_dir(): os.chdir(initial_dir) -def test_dl_calculation_5(obtain_temp_dir): - +def test_dl_calculation_5(obtain_temp_dir: tuple[str, str]) -> None: this_dir, temp_dir = obtain_temp_dir # Copy all input files to a temporary directory. @@ -60,13 +88,12 @@ def test_dl_calculation_5(obtain_temp_dir): demand_file='response.csv', config_path='1-AIM.json', output_path=None, - coupled_EDP=True, - realizations='100', + coupled_edp=True, + realizations=100, auto_script_path='PelicunDefault/Hazus_Earthquake_IM.py', detailed_results=False, output_format=None, custom_model_dir=None, - color_warnings=False, ) # diff --git a/pelicun/tests/dl_calculation/e6/test_e6.py b/pelicun/tests/dl_calculation/e6/test_e6.py index 928dc9a91..d71997b4e 100644 --- a/pelicun/tests/dl_calculation/e6/test_e6.py +++ b/pelicun/tests/dl_calculation/e6/test_e6.py @@ -1,30 +1,59 @@ -""" -DL Calculation Example 6 +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + +"""DL Calculation Example 6.""" + +from __future__ import annotations -""" - -import tempfile import os import shutil +import tempfile from pathlib import Path -import pytest -from pelicun.warnings import PelicunWarning -from pelicun.tools.DL_calculation import run_pelicun +from typing import Generator +import pytest -# pylint: disable=missing-function-docstring -# pylint: disable=missing-yield-doc -# pylint: disable=missing-yield-type-doc -# pylint: disable=redefined-outer-name +from pelicun.pelicun_warnings import PelicunWarning +from pelicun.tools.DL_calculation import run_pelicun @pytest.fixture -def obtain_temp_dir(): - +def obtain_temp_dir() -> Generator: # get the path of this file this_file = __file__ - initial_dir = os.getcwd() + initial_dir = Path.cwd() this_dir = str(Path(this_file).parent) temp_dir = tempfile.mkdtemp() @@ -36,8 +65,7 @@ def obtain_temp_dir(): os.chdir(initial_dir) -def test_dl_calculation_6(obtain_temp_dir): - +def test_dl_calculation_6(obtain_temp_dir: tuple[str, str]) -> None: this_dir, temp_dir = obtain_temp_dir # Copy all input files to a temporary directory. @@ -60,13 +88,12 @@ def test_dl_calculation_6(obtain_temp_dir): demand_file='response.csv', config_path='1-AIM.json', output_path=None, - coupled_EDP=True, - realizations='100', + coupled_edp=True, + realizations=100, auto_script_path='PelicunDefault/Hazus_Earthquake_IM.py', detailed_results=False, output_format=None, custom_model_dir=None, - color_warnings=False, ) # diff --git a/pelicun/tests/dl_calculation/e7/auto_HU_NJ.py b/pelicun/tests/dl_calculation/e7/auto_HU_NJ.py index 3bd24ffb8..c92024c93 100644 --- a/pelicun/tests/dl_calculation/e7/auto_HU_NJ.py +++ b/pelicun/tests/dl_calculation/e7/auto_HU_NJ.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# +# # noqa: N999 # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California # @@ -44,16 +43,16 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import pandas as pd +from __future__ import annotations -from WindMetaVarRulesets import parse_BIM +import pandas as pd from BuildingClassRulesets import building_class -from FloodAssmRulesets import Assm_config from FloodClassRulesets import FL_config from WindCECBRulesets import CECB_config from WindCERBRulesets import CERB_config from WindMECBRulesets import MECB_config from WindMERBRulesets import MERB_config +from WindMetaVarRulesets import parse_BIM from WindMHRulesets import MH_config from WindMLRIRulesets import MLRI_config from WindMLRMRulesets import MLRM_config @@ -66,82 +65,85 @@ from WindWSFRulesets import WSF_config -def auto_populate(AIM): +def auto_populate(aim: dict) -> tuple[dict, dict, pd.DataFrame]: # noqa: C901 """ - Populates the DL model for hurricane assessments in Atlantic County, NJ + Populates the DL model for hurricane assessments in Atlantic County, NJ. Assumptions: - - Everything relevant to auto-population is provided in the Buiding + - Everything relevant to auto-population is provided in the Building Information Model (AIM). - The information expected in the AIM file is described in the parse_AIM method. Parameters ---------- - AIM_in: dictionary + aim: dictionary Contains the information that is available about the asset and will be used to auto-popualate the damage and loss model. Returns ------- GI_ap: dictionary - Containes the extended AIM data. + Contains the extended AIM data. DL_ap: dictionary Contains the auto-populated loss model. - """ + Raises + ------ + ValueError + If the building class is not recognized. + + """ # extract the General Information - GI = AIM.get('GeneralInformation', None) + gi = aim.get('GeneralInformation') # parse the GI data - GI_ap = parse_BIM(GI, location="NJ", hazards=['wind', 'inundation']) + gi_ap = parse_BIM(gi, location='NJ', hazards=['wind', 'inundation']) # identify the building class - bldg_class = building_class(GI_ap, hazard='wind') + bldg_class = building_class(gi_ap, hazard='wind') # prepare the building configuration string if bldg_class == 'WSF': - bldg_config = WSF_config(GI_ap) + bldg_config = WSF_config(gi_ap) elif bldg_class == 'WMUH': - bldg_config = WMUH_config(GI_ap) + bldg_config = WMUH_config(gi_ap) elif bldg_class == 'MSF': - bldg_config = MSF_config(GI_ap) + bldg_config = MSF_config(gi_ap) elif bldg_class == 'MMUH': - bldg_config = MMUH_config(GI_ap) + bldg_config = MMUH_config(gi_ap) elif bldg_class == 'MLRM': - bldg_config = MLRM_config(GI_ap) + bldg_config = MLRM_config(gi_ap) elif bldg_class == 'MLRI': - bldg_config = MLRI_config(GI_ap) + bldg_config = MLRI_config(gi_ap) elif bldg_class == 'MERB': - bldg_config = MERB_config(GI_ap) + bldg_config = MERB_config(gi_ap) elif bldg_class == 'MECB': - bldg_config = MECB_config(GI_ap) + bldg_config = MECB_config(gi_ap) elif bldg_class == 'CECB': - bldg_config = CECB_config(GI_ap) + bldg_config = CECB_config(gi_ap) elif bldg_class == 'CERB': - bldg_config = CERB_config(GI_ap) + bldg_config = CERB_config(gi_ap) elif bldg_class == 'SPMB': - bldg_config = SPMB_config(GI_ap) + bldg_config = SPMB_config(gi_ap) elif bldg_class == 'SECB': - bldg_config = SECB_config(GI_ap) + bldg_config = SECB_config(gi_ap) elif bldg_class == 'SERB': - bldg_config = SERB_config(GI_ap) + bldg_config = SERB_config(gi_ap) elif bldg_class == 'MH': - bldg_config = MH_config(GI_ap) + bldg_config = MH_config(gi_ap) else: - raise ValueError( - f"Building class {bldg_class} not recognized by the " - f"auto-population routine." + msg = ( + f'Building class {bldg_class} not recognized by the ' + f'auto-population routine.' ) + raise ValueError(msg) # prepare the flood rulesets - fld_config = FL_config(GI_ap) - - # prepare the assembly loss compositions - hu_assm, fl_assm = Assm_config(GI_ap) + fld_config = FL_config(gi_ap) # prepare the component assignment - CMP = pd.DataFrame( + comp = pd.DataFrame( { f'{bldg_config}': ['ea', 1, 1, 1, 'N/A'], f'{fld_config}': ['ea', 1, 1, 1, 'N/A'], @@ -149,28 +151,28 @@ def auto_populate(AIM): index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], ).T - DL_ap = { - "Asset": { - "ComponentAssignmentFile": "CMP_QNT.csv", - "ComponentDatabase": "Hazus Hurricane", - "NumberOfStories": f"{GI_ap['NumberOfStories']}", - "OccupancyType": f"{GI_ap['OccupancyClass']}", - "PlanArea": f"{GI_ap['PlanArea']}", + dl_ap = { + 'Asset': { + 'ComponentAssignmentFile': 'CMP_QNT.csv', + 'ComponentDatabase': 'Hazus Hurricane', + 'NumberOfStories': f"{gi_ap['NumberOfStories']}", + 'OccupancyType': f"{gi_ap['OccupancyClass']}", + 'PlanArea': f"{gi_ap['PlanArea']}", }, - "Damage": {"DamageProcess": "Hazus Hurricane"}, - "Demands": {}, - "Losses": { - "BldgRepair": { - "ConsequenceDatabase": "Hazus Hurricane", - "MapApproach": "Automatic", - "DecisionVariables": { - "Cost": True, - "Carbon": False, - "Energy": False, - "Time": False, + 'Damage': {'DamageProcess': 'Hazus Hurricane'}, + 'Demands': {}, + 'Losses': { + 'BldgRepair': { + 'ConsequenceDatabase': 'Hazus Hurricane', + 'MapApproach': 'Automatic', + 'DecisionVariables': { + 'Cost': True, + 'Carbon': False, + 'Energy': False, + 'Time': False, }, } }, } - return GI_ap, DL_ap, CMP + return gi_ap, dl_ap, comp diff --git a/pelicun/tests/dl_calculation/e7/test_e7.py b/pelicun/tests/dl_calculation/e7/test_e7.py index 8babff899..a246ada1b 100644 --- a/pelicun/tests/dl_calculation/e7/test_e7.py +++ b/pelicun/tests/dl_calculation/e7/test_e7.py @@ -1,33 +1,60 @@ -""" -DL Calculation Example 7 +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + +"""DL Calculation Example 7.""" + +from __future__ import annotations -""" - -import tempfile import os import shutil -from glob import glob +import tempfile from pathlib import Path +from typing import Generator + import pytest # import pandas as pd -from pelicun.warnings import PelicunWarning +from pelicun.pelicun_warnings import PelicunWarning from pelicun.tools.DL_calculation import run_pelicun -# pylint: disable=missing-function-docstring -# pylint: disable=missing-yield-doc -# pylint: disable=missing-yield-type-doc -# pylint: disable=redefined-outer-name - - @pytest.fixture -def obtain_temp_dir(): - +def obtain_temp_dir() -> Generator: # get the path of this file this_file = __file__ - initial_dir = os.getcwd() + initial_dir = Path.cwd() this_dir = str(Path(this_file).parent) temp_dir = tempfile.mkdtemp() @@ -39,8 +66,7 @@ def obtain_temp_dir(): os.chdir(initial_dir) -def test_dl_calculation_7(obtain_temp_dir): - +def test_dl_calculation_7(obtain_temp_dir: tuple[str, str]) -> None: this_dir, temp_dir = obtain_temp_dir # Copy all input files to a temporary directory. @@ -49,8 +75,10 @@ def test_dl_calculation_7(obtain_temp_dir): # time. ruleset_files = [ - Path(x).resolve() - for x in glob('pelicun/tests/dl_calculation/rulesets/*Rulesets.py') + path.resolve() + for path in Path('pelicun/tests/dl_calculation/rulesets').glob( + '*Rulesets.py' + ) ] os.chdir(this_dir) @@ -72,19 +100,18 @@ def test_dl_calculation_7(obtain_temp_dir): demand_file='response.csv', config_path='1-AIM.json', output_path=None, - coupled_EDP=True, - realizations='100', + coupled_edp=True, + realizations=100, auto_script_path='auto_HU_NJ.py', detailed_results=False, output_format=None, custom_model_dir=None, - color_warnings=False, ) # now remove the ruleset files and auto script for file_path in ruleset_files: - os.remove(f'{temp_dir}/{file_path.name}') - os.remove('auto_HU_NJ.py') + Path(f'{temp_dir}/{file_path.name}').unlink() + Path('auto_HU_NJ.py').unlink() # # Test files diff --git a/pelicun/tests/dl_calculation/e8/auto_HU_LA.py b/pelicun/tests/dl_calculation/e8/auto_HU_LA.py index d1dd3d048..e8c5d575f 100644 --- a/pelicun/tests/dl_calculation/e8/auto_HU_LA.py +++ b/pelicun/tests/dl_calculation/e8/auto_HU_LA.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# +# # noqa: N999 # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California # @@ -33,7 +32,7 @@ # # You should have received a copy of the BSD 3-Clause License along with # this file. If not, see . -# + # Contributors: # Adam Zsarnóczay # Kuanshi Zhong @@ -44,20 +43,24 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import pandas as pd -from MetaVarRulesets import parse_BIM +from __future__ import annotations + +import contextlib + +import pandas as pd from BldgClassRulesets import building_class -from WindWSFRulesets import WSF_config +from MetaVarRulesets import parse_BIM from WindWMUHRulesets import WMUH_config +from WindWSFRulesets import WSF_config -def auto_populate(AIM): +def auto_populate(aim: dict) -> tuple[dict, dict, pd.DataFrame]: """ - Populates the DL model for hurricane assessments in Atlantic County, NJ + Populates the DL model for hurricane assessments in Atlantic County, NJ. Assumptions: - - Everything relevant to auto-population is provided in the Buiding + - Everything relevant to auto-population is provided in the Building Information Model (AIM). - The information expected in the AIM file is described in the parse_GI method. @@ -71,74 +74,78 @@ def auto_populate(AIM): Returns ------- GI_ap: dictionary - Containes the extended BIM data. + Contains the extended BIM data. DL_ap: dictionary Contains the auto-populated loss model. - """ + Raises + ------ + ValueError + If the building class is not recognized. + + """ # extract the General Information - GI = AIM.get('GeneralInformation', None) + gi = aim.get('GeneralInformation') # parse the GI data - GI_ap = parse_BIM( - GI, - location="LA", + gi_ap = parse_BIM( + gi, + location='LA', hazards=[ 'wind', ], ) # identify the building class - bldg_class = building_class(GI_ap, hazard='wind') - GI_ap.update({'HazusClassW': bldg_class}) + bldg_class = building_class(gi_ap, hazard='wind') + gi_ap.update({'HazusClassW': bldg_class}) # prepare the building configuration string if bldg_class == 'WSF': - bldg_config = WSF_config(GI_ap) + bldg_config = WSF_config(gi_ap) elif bldg_class == 'WMUH': - bldg_config = WMUH_config(GI_ap) + bldg_config = WMUH_config(gi_ap) else: - raise ValueError( - f"Building class {bldg_class} not recognized by the " - f"auto-population routine." + msg = ( + f'Building class {bldg_class} not recognized by the ' + f'auto-population routine.' ) + raise ValueError(msg) # drop keys of internal variables from GI_ap dict internal_vars = ['V_ult', 'V_asd'] for var in internal_vars: - try: - GI_ap.pop(var) - except KeyError: - pass + with contextlib.suppress(KeyError): + gi_ap.pop(var) # prepare the component assignment - CMP = pd.DataFrame( + comp = pd.DataFrame( {f'{bldg_config}': ['ea', 1, 1, 1, 'N/A']}, index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], ).T - DL_ap = { - "Asset": { - "ComponentAssignmentFile": "CMP_QNT.csv", - "ComponentDatabase": "Hazus Hurricane", - "NumberOfStories": f"{GI_ap['NumberOfStories']}", - "OccupancyType": f"{GI_ap['OccupancyClass']}", - "PlanArea": f"{GI_ap['PlanArea']}", + dl_ap = { + 'Asset': { + 'ComponentAssignmentFile': 'CMP_QNT.csv', + 'ComponentDatabase': 'Hazus Hurricane', + 'NumberOfStories': f"{gi_ap['NumberOfStories']}", + 'OccupancyType': f"{gi_ap['OccupancyClass']}", + 'PlanArea': f"{gi_ap['PlanArea']}", }, - "Damage": {"DamageProcess": "Hazus Hurricane"}, - "Demands": {}, - "Losses": { - "BldgRepair": { - "ConsequenceDatabase": "Hazus Hurricane", - "MapApproach": "Automatic", - "DecisionVariables": { - "Cost": True, - "Carbon": False, - "Energy": False, - "Time": False, + 'Damage': {'DamageProcess': 'Hazus Hurricane'}, + 'Demands': {}, + 'Losses': { + 'BldgRepair': { + 'ConsequenceDatabase': 'Hazus Hurricane', + 'MapApproach': 'Automatic', + 'DecisionVariables': { + 'Cost': True, + 'Carbon': False, + 'Energy': False, + 'Time': False, }, } }, } - return GI_ap, DL_ap, CMP + return gi_ap, dl_ap, comp diff --git a/pelicun/tests/dl_calculation/e8/test_e8.py b/pelicun/tests/dl_calculation/e8/test_e8.py index 35421a3ca..f098718ef 100644 --- a/pelicun/tests/dl_calculation/e8/test_e8.py +++ b/pelicun/tests/dl_calculation/e8/test_e8.py @@ -1,31 +1,59 @@ -""" -DL Calculation Example 8 +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + +"""DL Calculation Example 8.""" + +from __future__ import annotations -""" - -import tempfile import os import shutil -from glob import glob +import tempfile from pathlib import Path -import pytest -from pelicun.warnings import PelicunWarning -from pelicun.tools.DL_calculation import run_pelicun +from typing import Generator +import pytest -# pylint: disable=missing-function-docstring -# pylint: disable=missing-yield-doc -# pylint: disable=missing-yield-type-doc -# pylint: disable=redefined-outer-name +from pelicun.pelicun_warnings import PelicunWarning +from pelicun.tools.DL_calculation import run_pelicun @pytest.fixture -def obtain_temp_dir(): - +def obtain_temp_dir() -> Generator: # get the path of this file this_file = __file__ - initial_dir = os.getcwd() + initial_dir = Path.cwd() this_dir = str(Path(this_file).parent) temp_dir = tempfile.mkdtemp() @@ -37,8 +65,7 @@ def obtain_temp_dir(): os.chdir(initial_dir) -def test_dl_calculation_8(obtain_temp_dir): - +def test_dl_calculation_8(obtain_temp_dir: tuple[str, str]) -> None: this_dir, temp_dir = obtain_temp_dir # Copy all input files to a temporary directory. @@ -47,8 +74,10 @@ def test_dl_calculation_8(obtain_temp_dir): # time. ruleset_files = [ - Path(x).resolve() - for x in glob('pelicun/tests/dl_calculation/rulesets/*Rulesets.py') + path.resolve() + for path in Path('pelicun/tests/dl_calculation/rulesets').glob( + '*Rulesets.py' + ) ] os.chdir(this_dir) @@ -70,19 +99,18 @@ def test_dl_calculation_8(obtain_temp_dir): demand_file='response.csv', config_path='1-AIM.json', output_path=None, - coupled_EDP=True, - realizations='100', + coupled_edp=True, + realizations=100, auto_script_path='auto_HU_LA.py', detailed_results=False, output_format=None, custom_model_dir=None, - color_warnings=False, ) # now remove the ruleset files and auto script for file_path in ruleset_files: - os.remove(f'{temp_dir}/{file_path.name}') - os.remove('auto_HU_LA.py') + Path(f'{temp_dir}/{file_path.name}').unlink() + Path('auto_HU_LA.py').unlink() # # Test files diff --git a/pelicun/tests/dl_calculation/e9/custom_pop.py b/pelicun/tests/dl_calculation/e9/custom_pop.py index 40b8c3c85..fe99b9a47 100644 --- a/pelicun/tests/dl_calculation/e9/custom_pop.py +++ b/pelicun/tests/dl_calculation/e9/custom_pop.py @@ -1,21 +1,48 @@ -# -*- coding: utf-8 -*- - -# Contributors: -# Stevan Gavrilovic -# Adam Zsarnoczay -# Example 9 Tsunami, Seaside +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . import pandas as pd -def auto_populate(AIM): +def auto_populate(aim: dict) -> tuple: """ - Populates the DL model for tsunami example using custom fragility functions + Populates the DL model for tsunami example using custom fragility functions. Assumptions ----------- * Everything relevant to auto-population is provided in the - Buiding Information Model (AIM). + Building Information Model (AIM). * The information expected in the AIM file is described in the parse_AIM method. @@ -31,22 +58,21 @@ def auto_populate(AIM): Contains the extended AIM data. DL_ap: dictionary Contains the auto-populated loss model. - """ + """ # parse the AIM data # print(AIM) # Look in the AIM.json file to see what you can access here # extract the General Information - GI = AIM.get('GeneralInformation', None) + gi = aim['GeneralInformation'] # GI_ap is the 'extended AIM data - this case no extended AIM data - GI_ap = GI.copy() + gi_ap = gi.copy() # Get the number of Stories - note the column heading needs to be exactly # 'NumberOfStories'. - nstories = GI_ap.get('NumberOfStories', None) + nstories = gi_ap.get('NumberOfStories', None) if nstories is None: - print("NumberOfStories attribute missing from AIM file.") return None, None, None # Get the fragility tag according to some building attribute; the @@ -62,39 +88,39 @@ def auto_populate(AIM): elif nstories >= 3: fragility_function_tag = 'building.3andAbove' else: - print(f"Invalid number of storeys provided: {nstories}") + print(f'Invalid number of storeys provided: {nstories}') # noqa: T201 # prepare the component assignment - CMP = pd.DataFrame( + comp = pd.DataFrame( {f'{fragility_function_tag}': ['ea', 1, 1, 1, 'N/A']}, index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], ).T # Populate the DL_ap - DL_ap = { - "Asset": { - "ComponentAssignmentFile": "CMP_QNT.csv", - "ComponentDatabase": "None", - "ComponentDatabasePath": "CustomDLDataFolder/damage_Tsunami.csv", + dl_ap = { + 'Asset': { + 'ComponentAssignmentFile': 'CMP_QNT.csv', + 'ComponentDatabase': 'None', + 'ComponentDatabasePath': 'CustomDLDataFolder/damage_Tsunami.csv', }, - "Damage": {"DamageProcess": "None"}, - "Demands": {}, - "Losses": { - "BldgRepair": { - "ConsequenceDatabase": "None", - "ConsequenceDatabasePath": ( - "CustomDLDataFolder/loss_repair_Tsunami.csv" + 'Damage': {'DamageProcess': 'None'}, + 'Demands': {}, + 'Losses': { + 'BldgRepair': { + 'ConsequenceDatabase': 'None', + 'ConsequenceDatabasePath': ( + 'CustomDLDataFolder/loss_repair_Tsunami.csv' ), - "MapApproach": "User Defined", - "MapFilePath": "CustomDLDataFolder/loss_map.csv", - "DecisionVariables": { - "Cost": True, - "Carbon": False, - "Energy": False, - "Time": False, + 'MapApproach': 'User Defined', + 'MapFilePath': 'CustomDLDataFolder/loss_map.csv', + 'DecisionVariables': { + 'Cost': True, + 'Carbon': False, + 'Energy': False, + 'Time': False, }, } }, } - return GI_ap, DL_ap, CMP + return gi_ap, dl_ap, comp diff --git a/pelicun/tests/dl_calculation/e9/test_e9.py b/pelicun/tests/dl_calculation/e9/test_e9.py index f2562cbbf..299e1c7a2 100644 --- a/pelicun/tests/dl_calculation/e9/test_e9.py +++ b/pelicun/tests/dl_calculation/e9/test_e9.py @@ -1,31 +1,59 @@ -""" -DL Calculation Example 9 +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + +"""DL Calculation Example 9.""" + +from __future__ import annotations -""" - -import tempfile import os import shutil -from glob import glob +import tempfile from pathlib import Path -import pytest -from pelicun.warnings import PelicunWarning -from pelicun.tools.DL_calculation import run_pelicun +from typing import Generator +import pytest -# pylint: disable=missing-function-docstring -# pylint: disable=missing-yield-doc -# pylint: disable=missing-yield-type-doc -# pylint: disable=redefined-outer-name +from pelicun.pelicun_warnings import PelicunWarning +from pelicun.tools.DL_calculation import run_pelicun @pytest.fixture -def obtain_temp_dir(): - +def obtain_temp_dir() -> Generator: # get the path of this file this_file = __file__ - initial_dir = os.getcwd() + initial_dir = Path.cwd() this_dir = str(Path(this_file).parent) temp_dir = tempfile.mkdtemp() @@ -37,8 +65,7 @@ def obtain_temp_dir(): os.chdir(initial_dir) -def test_dl_calculation_9(obtain_temp_dir): - +def test_dl_calculation_9(obtain_temp_dir: tuple[str, str]) -> None: this_dir, temp_dir = obtain_temp_dir # Copy all input files to a temporary directory. @@ -46,9 +73,12 @@ def test_dl_calculation_9(obtain_temp_dir): # This approach is more robust to changes in the output files over # time. ruleset_files = [ - Path(x).resolve() - for x in glob('pelicun/tests/dl_calculation/rulesets/*Rulesets.py') + path.resolve() + for path in Path('pelicun/tests/dl_calculation/rulesets').glob( + '*Rulesets.py' + ) ] + dl_models_dir = Path(f'{this_dir}/CustomDLModels').resolve() os.chdir(this_dir) temp_dir = tempfile.mkdtemp() @@ -69,19 +99,18 @@ def test_dl_calculation_9(obtain_temp_dir): demand_file='response.csv', config_path='3500-AIM.json', output_path=None, - coupled_EDP=True, - realizations='100', + coupled_edp=True, + realizations=100, auto_script_path='custom_pop.py', detailed_results=False, output_format=None, custom_model_dir='./CustomDLModels', - color_warnings=False, ) # now remove the ruleset files and auto script for file_path in ruleset_files: - os.remove(f'{temp_dir}/{file_path.name}') - os.remove('custom_pop.py') + Path(f'{temp_dir}/{file_path.name}').unlink() + Path('custom_pop.py').unlink() # # Test files diff --git a/pelicun/tests/dl_calculation/other/o1/run_o1.py b/pelicun/tests/dl_calculation/other/o1/run_o1.py index d40389a57..1dafb3c7f 100644 --- a/pelicun/tests/dl_calculation/other/o1/run_o1.py +++ b/pelicun/tests/dl_calculation/other/o1/run_o1.py @@ -1,8 +1,41 @@ -""" -Runs pelicun assessments using DL_calculation.py via subprocess. -""" +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + +"""Runs pelicun assessments using DL_calculation.py via subprocess.""" import sys + from pelicun.tools.DL_calculation import main sys.argv = ['pelicun', '-c', 'config_file.json', '--dirnameOutput', 'output'] diff --git a/pelicun/tests/dl_calculation/rulesets/BldgClassRulesets.py b/pelicun/tests/dl_calculation/rulesets/BldgClassRulesets.py index 70c6395d8..fd1f53bdf 100644 --- a/pelicun/tests/dl_calculation/rulesets/BldgClassRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/BldgClassRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -44,9 +43,9 @@ # Tracy Kijewski-Correa -def building_class(BIM, hazard): +def building_class(bim: dict, hazard: str) -> str: # noqa: C901 """ - Short description + Short description. Long description @@ -61,27 +60,27 @@ def building_class(BIM, hazard): ------- bldg_class: str One of the standard building class labels from HAZUS - """ + """ # check hazard - if hazard not in ['wind', 'inundation']: - print(f'WARNING: The provided hazard is not recognized: {hazard}') + if hazard not in {'wind', 'inundation'}: + print(f'WARNING: The provided hazard is not recognized: {hazard}') # noqa: T201 if hazard == 'wind': - if BIM['BuildingType'] == "Wood": - if (BIM['OccupancyClass'] == 'RES1') or ( - (BIM['RoofShape'] != 'flt') and (BIM['OccupancyClass'] == '') + if bim['BuildingType'] == 'Wood': + if (bim['OccupancyClass'] == 'RES1') or ( + (bim['RoofShape'] != 'flt') and (bim['OccupancyClass'] == '') # noqa: PLC1901 ): # OccupancyClass = RES1 # Wood Single-Family Homes (WSF1 or WSF2) # OR roof type = flat (HAZUS can only map flat to WSF1) # OR default (by '') if ( - BIM['RoofShape'] == 'flt' + bim['RoofShape'] == 'flt' ): # checking if there is a misclassication - BIM['RoofShape'] = ( + bim['RoofShape'] = ( # ensure the WSF has gab (by default, note gab - # is more vulneable than hip) + # is more vulnerable than hip) 'gab' ) bldg_class = 'WSF' @@ -90,16 +89,16 @@ def building_class(BIM, hazard): # Wood Multi-Unit Hotel (WMUH1, WMUH2, or WMUH3) bldg_class = 'WMUH' - elif BIM['BuildingType'] == "Steel": - if (BIM['DesignLevel'] == 'E') and ( - BIM['OccupancyClass'] - in ['RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'] + elif bim['BuildingType'] == 'Steel': + if (bim['DesignLevel'] == 'E') and ( + bim['OccupancyClass'] + in {'RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'} ): # Steel Engineered Residential Building (SERBL, SERBM, SERBH) bldg_class = 'SERB' - elif (BIM['DesignLevel'] == 'E') and ( - BIM['OccupancyClass'] - in [ + elif (bim['DesignLevel'] == 'E') and ( + bim['OccupancyClass'] + in { 'COM1', 'COM2', 'COM3', @@ -110,23 +109,23 @@ def building_class(BIM, hazard): 'COM8', 'COM9', 'COM10', - ] + } ): # Steel Engineered Commercial Building (SECBL, SECBM, SECBH) bldg_class = 'SECB' - elif (BIM['DesignLevel'] == 'PE') and ( - BIM['OccupancyClass'] - not in ['RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'] + elif (bim['DesignLevel'] == 'PE') and ( + bim['OccupancyClass'] + not in {'RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'} ): # Steel Pre-Engineered Metal Building (SPMBS, SPMBM, SPMBL) bldg_class = 'SPMB' else: bldg_class = 'SECB' - elif BIM['BuildingType'] == "Concrete": - if (BIM['DesignLevel'] == 'E') and ( - BIM['OccupancyClass'] - in [ + elif bim['BuildingType'] == 'Concrete': + if (bim['DesignLevel'] == 'E') and ( + bim['OccupancyClass'] + in { 'RES3A', 'RES3B', 'RES3C', @@ -135,13 +134,13 @@ def building_class(BIM, hazard): 'RES3F', 'RES5', 'RES6', - ] + } ): # Concrete Engineered Residential Building (CERBL, CERBM, CERBH) bldg_class = 'CERB' - elif (BIM['DesignLevel'] == 'E') and ( - BIM['OccupancyClass'] - in [ + elif (bim['DesignLevel'] == 'E') and ( + bim['OccupancyClass'] + in { 'COM1', 'COM2', 'COM3', @@ -152,27 +151,27 @@ def building_class(BIM, hazard): 'COM8', 'COM9', 'COM10', - ] + } ): # Concrete Engineered Commercial Building (CECBL, CECBM, CECBH) bldg_class = 'CECB' else: bldg_class = 'CECB' - elif BIM['BuildingType'] == "Masonry": - if BIM['OccupancyClass'] == 'RES1': + elif bim['BuildingType'] == 'Masonry': + if bim['OccupancyClass'] == 'RES1': # OccupancyClass = RES1 # Masonry Single-Family Homes (MSF1 or MSF2) bldg_class = 'MSF' elif ( - BIM['OccupancyClass'] - in ['RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'] - ) and (BIM['DesignLevel'] == 'E'): + bim['OccupancyClass'] + in {'RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'} + ) and (bim['DesignLevel'] == 'E'): # Masonry Engineered Residential Building (MERBL, MERBM, MERBH) bldg_class = 'MERB' elif ( - BIM['OccupancyClass'] - in [ + bim['OccupancyClass'] + in { 'COM1', 'COM2', 'COM3', @@ -183,21 +182,21 @@ def building_class(BIM, hazard): 'COM8', 'COM9', 'COM10', - ] - ) and (BIM['DesignLevel'] == 'E'): + } + ) and (bim['DesignLevel'] == 'E'): # Masonry Engineered Commercial Building (MECBL, MECBM, MECBH) bldg_class = 'MECB' - elif BIM['OccupancyClass'] in [ + elif bim['OccupancyClass'] in { 'IND1', 'IND2', 'IND3', 'IND4', 'IND5', 'IND6', - ]: + }: # Masonry Low-Rise Masonry Warehouse/Factory (MLRI) bldg_class = 'MLRI' - elif BIM['OccupancyClass'] in [ + elif bim['OccupancyClass'] in { 'RES3A', 'RES3B', 'RES3C', @@ -207,12 +206,12 @@ def building_class(BIM, hazard): 'RES5', 'RES6', 'COM8', - ]: + }: # OccupancyClass = RES3X or COM8 # Masonry Multi-Unit Hotel/Motel (MMUH1, MMUH2, or MMUH3) bldg_class = 'MMUH' - elif (BIM['NumberOfStories'] == 1) and ( - BIM['OccupancyClass'] in ['COM1', 'COM2'] + elif (bim['NumberOfStories'] == 1) and ( + bim['OccupancyClass'] in {'COM1', 'COM2'} ): # Low-Rise Masonry Strip Mall (MLRM1 or MLRM2) bldg_class = 'MLRM' @@ -236,7 +235,7 @@ def building_class(BIM, hazard): else: bldg_class = 'MECB' # for others not covered by the above - elif BIM['BuildingType'] == "Manufactured": + elif bim['BuildingType'] == 'Manufactured': bldg_class = 'MH' else: diff --git a/pelicun/tests/dl_calculation/rulesets/BuildingClassRulesets.py b/pelicun/tests/dl_calculation/rulesets/BuildingClassRulesets.py index 40312a8b1..e3a593a50 100644 --- a/pelicun/tests/dl_calculation/rulesets/BuildingClassRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/BuildingClassRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -44,9 +43,9 @@ # Tracy Kijewski-Correa -def building_class(BIM, hazard): +def building_class(bim: dict, hazard: str) -> str: # noqa: C901 """ - Short description + Short description. Long description @@ -59,17 +58,16 @@ def building_class(BIM, hazard): ------- bldg_class: str One of the standard building class labels from HAZUS - """ + """ # check hazard - if hazard not in ['wind', 'inundation']: - print(f'WARNING: The provided hazard is not recognized: {hazard}') + if hazard not in {'wind', 'inundation'}: + print(f'WARNING: The provided hazard is not recognized: {hazard}') # noqa: T201 if hazard == 'wind': - - if BIM['BuildingType'] == 'Wood': - if (BIM['OccupancyClass'] == 'RES1') or ( - (BIM['RoofShape'] != 'flt') and (BIM['OccupancyClass'] == '') + if bim['BuildingType'] == 'Wood': + if (bim['OccupancyClass'] == 'RES1') or ( + (bim['RoofShape'] != 'flt') and (bim['OccupancyClass'] == '') # noqa: PLC1901 ): # BuildingType = 3001 # OccupancyClass = RES1 @@ -77,11 +75,11 @@ def building_class(BIM, hazard): # OR roof type = flat (HAZUS can only map flat to WSF1) # OR default (by '') if ( - BIM['RoofShape'] == 'flt' + bim['RoofShape'] == 'flt' ): # checking if there is a misclassication - BIM['RoofShape'] = ( + bim['RoofShape'] = ( # ensure the WSF has gab (by default, note gab - # is more vulneable than hip) + # is more vulnerable than hip) 'gab' ) bldg_class = 'WSF' @@ -90,17 +88,17 @@ def building_class(BIM, hazard): # OccupancyClass = RES3, RES5, RES6, or COM8 # Wood Multi-Unit Hotel (WMUH1, WMUH2, or WMUH3) bldg_class = 'WMUH' - elif BIM['BuildingType'] == 'Steel': - if (BIM['DesignLevel'] == 'E') and ( - BIM['OccupancyClass'] - in ['RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'] + elif bim['BuildingType'] == 'Steel': + if (bim['DesignLevel'] == 'E') and ( + bim['OccupancyClass'] + in {'RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'} ): # BuildingType = 3002 # Steel Engineered Residential Building (SERBL, SERBM, SERBH) bldg_class = 'SERB' - elif (BIM['DesignLevel'] == 'E') and ( - BIM['OccupancyClass'] - in [ + elif (bim['DesignLevel'] == 'E') and ( + bim['OccupancyClass'] + in { 'COM1', 'COM2', 'COM3', @@ -111,24 +109,24 @@ def building_class(BIM, hazard): 'COM8', 'COM9', 'COM10', - ] + } ): # BuildingType = 3002 # Steel Engineered Commercial Building (SECBL, SECBM, SECBH) bldg_class = 'SECB' - elif (BIM['DesignLevel'] == 'PE') and ( - BIM['OccupancyClass'] - not in ['RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'] + elif (bim['DesignLevel'] == 'PE') and ( + bim['OccupancyClass'] + not in {'RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'} ): # BuildingType = 3002 # Steel Pre-Engineered Metal Building (SPMBS, SPMBM, SPMBL) bldg_class = 'SPMB' else: bldg_class = 'SECB' - elif BIM['BuildingType'] == 'Concrete': - if (BIM['DesignLevel'] == 'E') and ( - BIM['OccupancyClass'] - in [ + elif bim['BuildingType'] == 'Concrete': + if (bim['DesignLevel'] == 'E') and ( + bim['OccupancyClass'] + in { 'RES3A', 'RES3B', 'RES3C', @@ -137,14 +135,14 @@ def building_class(BIM, hazard): 'RES3F', 'RES5', 'RES6', - ] + } ): # BuildingType = 3003 # Concrete Engineered Residential Building (CERBL, CERBM, CERBH) bldg_class = 'CERB' - elif (BIM['DesignLevel'] == 'E') and ( - BIM['OccupancyClass'] - in [ + elif (bim['DesignLevel'] == 'E') and ( + bim['OccupancyClass'] + in { 'COM1', 'COM2', 'COM3', @@ -155,29 +153,29 @@ def building_class(BIM, hazard): 'COM8', 'COM9', 'COM10', - ] + } ): # BuildingType = 3003 # Concrete Engineered Commercial Building (CECBL, CECBM, CECBH) bldg_class = 'CECB' else: bldg_class = 'CECB' - elif BIM['BuildingType'] == 'Masonry': - if BIM['OccupancyClass'] == 'RES1': + elif bim['BuildingType'] == 'Masonry': + if bim['OccupancyClass'] == 'RES1': # BuildingType = 3004 # OccupancyClass = RES1 # Masonry Single-Family Homes (MSF1 or MSF2) bldg_class = 'MSF' elif ( - BIM['OccupancyClass'] - in ['RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'] - ) and (BIM['DesignLevel'] == 'E'): + bim['OccupancyClass'] + in {'RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'} + ) and (bim['DesignLevel'] == 'E'): # BuildingType = 3004 # Masonry Engineered Residential Building (MERBL, MERBM, MERBH) bldg_class = 'MERB' elif ( - BIM['OccupancyClass'] - in [ + bim['OccupancyClass'] + in { 'COM1', 'COM2', 'COM3', @@ -188,23 +186,23 @@ def building_class(BIM, hazard): 'COM8', 'COM9', 'COM10', - ] - ) and (BIM['DesignLevel'] == 'E'): + } + ) and (bim['DesignLevel'] == 'E'): # BuildingType = 3004 # Masonry Engineered Commercial Building (MECBL, MECBM, MECBH) bldg_class = 'MECB' - elif BIM['OccupancyClass'] in [ + elif bim['OccupancyClass'] in { 'IND1', 'IND2', 'IND3', 'IND4', 'IND5', 'IND6', - ]: + }: # BuildingType = 3004 # Masonry Low-Rise Masonry Warehouse/Factory (MLRI) bldg_class = 'MLRI' - elif BIM['OccupancyClass'] in [ + elif bim['OccupancyClass'] in { 'RES3A', 'RES3B', 'RES3C', @@ -214,13 +212,13 @@ def building_class(BIM, hazard): 'RES5', 'RES6', 'COM8', - ]: + }: # BuildingType = 3004 # OccupancyClass = RES3X or COM8 # Masonry Multi-Unit Hotel/Motel (MMUH1, MMUH2, or MMUH3) bldg_class = 'MMUH' - elif (BIM['NumberOfStories'] == 1) and ( - BIM['OccupancyClass'] in ['COM1', 'COM2'] + elif (bim['NumberOfStories'] == 1) and ( + bim['OccupancyClass'] in {'COM1', 'COM2'} ): # BuildingType = 3004 # Low-Rise Masonry Strip Mall (MLRM1 or MLRM2) @@ -245,7 +243,7 @@ def building_class(BIM, hazard): # return 'MMUHNE' else: bldg_class = 'MECB' # for others not covered by the above - elif BIM['BuildingType'] == 'Manufactured': + elif bim['BuildingType'] == 'Manufactured': bldg_class = 'MH' else: diff --git a/pelicun/tests/dl_calculation/rulesets/FloodAssmRulesets.py b/pelicun/tests/dl_calculation/rulesets/FloodAssmRulesets.py index c9fecb11d..c06a6bdbe 100644 --- a/pelicun/tests/dl_calculation/rulesets/FloodAssmRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/FloodAssmRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -45,9 +44,12 @@ # Tracy Kijewski-Correa -def Assm_config(BIM): +from __future__ import annotations + + +def Assm_config(bim: dict) -> tuple[str, str]: """ - Rules to identify the flood vunerability category + Rules to identify the flood vulnerability category. Parameters ---------- @@ -57,23 +59,24 @@ def Assm_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. + A string that identifies a specific configuration within this + building class. + """ - year = BIM['YearBuilt'] # just for the sake of brevity + year = bim['YearBuilt'] # just for the sake of brevity # Flood Type - if BIM['FloodZone'] in ['AO']: + if bim['FloodZone'] == 'AO': flood_type = 'raz' # Riverline/A-Zone - elif BIM['FloodZone'] in ['AE', 'AH', 'A']: + elif bim['FloodZone'] in {'AE', 'AH', 'A'}: flood_type = 'caz' # Costal/A-Zone - elif BIM['FloodZone'] in ['VE']: + elif bim['FloodZone'] == 'VE': flood_type = 'cvz' # Costal/V-Zone else: flood_type = 'caz' # Default # PostFIRM - PostFIRM = False # Default + post_firm = False # Default city_list = [ 'Absecon', 'Atlantic', @@ -124,20 +127,20 @@ def Assm_config(BIM): 1971, 1979, ] - for i in range(0, 22): - PostFIRM = ( - (BIM['City'] == city_list[i]) and (year > year_list[i]) - ) or PostFIRM + for i in range(22): + post_firm = ( + (bim['City'] == city_list[i]) and (year > year_list[i]) + ) or post_firm # fl_assm fl_assm = ( f"{'fl_surge_assm'}_" - f"{BIM['OccupancyClass']}_" - f"{int(PostFIRM)}_" + f"{bim['OccupancyClass']}_" + f"{int(post_firm)}_" f"{flood_type}" ) # hu_assm - hu_assm = f"{'hu_surge_assm'}_" f"{BIM['OccupancyClass']}_" f"{int(PostFIRM)}" + hu_assm = f"{'hu_surge_assm'}_{bim['OccupancyClass']}_{int(post_firm)}" return hu_assm, fl_assm diff --git a/pelicun/tests/dl_calculation/rulesets/FloodClassRulesets.py b/pelicun/tests/dl_calculation/rulesets/FloodClassRulesets.py index 035b64307..954235e2a 100644 --- a/pelicun/tests/dl_calculation/rulesets/FloodClassRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/FloodClassRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -46,9 +45,9 @@ import numpy as np -def FL_config(BIM): +def FL_config(bim: dict) -> str: # noqa: C901 """ - Rules to identify the flood vunerability category + Rules to identify the flood vulnerability category. Parameters ---------- @@ -58,17 +57,16 @@ def FL_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. + A string that identifies a specific configuration within this + building class. + """ - year = BIM['YearBuilt'] # just for the sake of brevity + year = bim['YearBuilt'] # just for the sake of brevity # Flood Type - if BIM['FloodZone'] == 'AO': + if bim['FloodZone'] == 'AO': flood_type = 'raz' # Riverline/A-Zone - elif BIM['FloodZone'] in ['A', 'AE']: - flood_type = 'cvz' # Costal-Zone - elif BIM['FloodZone'].startswith('V'): + elif bim['FloodZone'] in {'A', 'AE'} or bim['FloodZone'].startswith('V'): flood_type = 'cvz' # Costal-Zone else: flood_type = 'cvz' # Default @@ -81,7 +79,7 @@ def FL_config(BIM): # FFE = BIM['FirstFloorElevation'] - 1.0 # PostFIRM - PostFIRM = False # Default + post_firm = False # Default city_list = [ 'Absecon', 'Atlantic', @@ -132,115 +130,53 @@ def FL_config(BIM): 1971, 1979, ] - for i in range(0, 22): - PostFIRM = ( - (BIM['City'] == city_list[i]) and (year > year_list[i]) - ) or PostFIRM + for i in range(22): + post_firm = ( + (bim['City'] == city_list[i]) and (year > year_list[i]) + ) or post_firm # Basement Type - if BIM['SplitLevel'] and (BIM['FoundationType'] == 3504): + if bim['SplitLevel'] and (bim['FoundationType'] == 3504): bmt_type = 'spt' # Split-Level Basement - elif BIM['FoundationType'] in [3501, 3502, 3503, 3505, 3506, 3507]: + elif bim['FoundationType'] in {3501, 3502, 3503, 3505, 3506, 3507}: bmt_type = 'bn' # No Basement - elif (not BIM['SplitLevel']) and (BIM['FoundationType'] == 3504): + elif (not bim['SplitLevel']) and (bim['FoundationType'] == 3504): bmt_type = 'bw' # Basement else: bmt_type = 'bw' # Default - # flake8 - unused variable: `dur`. - # # Duration - # dur = 'short' - - # flake8: unused variable: `OT` - # # Occupancy Type - # if BIM['OccupancyClass'] == 'RES1': - # if BIM['NumberOfStories'] == 1: - # if flood_type == 'raz': - # OT = 'SF1XA' - # elif flood_type == 'cvz': - # OT = 'SF1XV' - # else: - # if bmt_type == 'nav': - # if flood_type == 'raz': - # OT = 'SF2XA' - # elif flood_type == 'cvz': - # OT = 'SF2XV' - # elif bmt_type == 'bmt': - # if flood_type == 'raz': - # OT = 'SF2BA' - # elif flood_type == 'cvz': - # OT = 'SF2BV' - # elif bmt_type == 'spt': - # if flood_type == 'raz': - # OT = 'SF2SA' - # elif flood_type == 'cvz': - # OT = 'SF2SV' - # elif 'RES3' in BIM['OccupancyClass']: - # OT = 'APT' - # else: - # ap_OT = { - # 'RES2': 'MH', - # 'RES4': 'HOT', - # 'RES5': 'NURSE', - # 'RES6': 'NURSE', - # 'COM1': 'RETAL', - # 'COM2': 'WHOLE', - # 'COM3': 'SERVICE', - # 'COM4': 'OFFICE', - # 'COM5': 'BANK', - # 'COM6': 'HOSP', - # 'COM7': 'MED', - # 'COM8': 'REC', - # 'COM9': 'THEAT', - # 'COM10': 'GARAGE', - # 'IND1': 'INDH', - # 'IND2': 'INDL', - # 'IND3': 'CHEM', - # 'IND4': 'PROC', - # 'IND5': 'CHEM', - # 'IND6': 'CONST', - # 'AGR1': 'AGRI', - # 'REL1': 'RELIG', - # 'GOV1': 'CITY', - # 'GOV2': 'EMERG', - # 'EDU1': 'SCHOOL', - # 'EDU2': 'SCHOOL', - # } - # ap_OT[BIM['OccupancyClass']] - - if BIM['OccupancyClass'] not in ['RES1', 'RES2']: - if 'RES3' in BIM['OccupancyClass']: + if bim['OccupancyClass'] not in {'RES1', 'RES2'}: + if 'RES3' in bim['OccupancyClass']: fl_config = f"{'fl'}_" f"{'RES3'}" else: - fl_config = f"{'fl'}_" f"{BIM['OccupancyClass']}" - elif BIM['OccupancyClass'] == 'RES2': - fl_config = f"{'fl'}_" f"{BIM['OccupancyClass']}_" f"{flood_type}" + fl_config = f"{'fl'}_" f"{bim['OccupancyClass']}" + elif bim['OccupancyClass'] == 'RES2': + fl_config = f"{'fl'}_" f"{bim['OccupancyClass']}_" f"{flood_type}" + elif bmt_type == 'spt': + fl_config = ( + f"{'fl'}_" + f"{bim['OccupancyClass']}_" + f"{'sl'}_" + f"{'bw'}_" + f"{flood_type}" + ) else: - if bmt_type == 'spt': - fl_config = ( - f"{'fl'}_" - f"{BIM['OccupancyClass']}_" - f"{'sl'}_" - f"{'bw'}_" - f"{flood_type}" - ) - else: - st = 's' + str(np.min([BIM['NumberOfStories'], 3])) - fl_config = ( - f"{'fl'}_" - f"{BIM['OccupancyClass']}_" - f"{st}_" - f"{bmt_type}_" - f"{flood_type}" - ) + st = 's' + str(np.min([bim['NumberOfStories'], 3])) + fl_config = ( + f"{'fl'}_" + f"{bim['OccupancyClass']}_" + f"{st}_" + f"{bmt_type}_" + f"{flood_type}" + ) # extend the BIM dictionary - BIM.update( - dict( - FloodType=flood_type, - BasementType=bmt_type, - PostFIRM=PostFIRM, - ) + bim.update( + { + 'FloodType': flood_type, + 'BasementType': bmt_type, + 'PostFIRM': post_firm, + } ) return fl_config diff --git a/pelicun/tests/dl_calculation/rulesets/FloodRulesets.py b/pelicun/tests/dl_calculation/rulesets/FloodRulesets.py index f32939a11..7d8864b8f 100644 --- a/pelicun/tests/dl_calculation/rulesets/FloodRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/FloodRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -46,9 +45,9 @@ import numpy as np -def FL_config(BIM): +def FL_config(bim: dict) -> str: # noqa: C901 """ - Rules to identify the flood vunerability category + Rules to identify the flood vulnerability category. Parameters ---------- @@ -58,17 +57,16 @@ def FL_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. + A string that identifies a specific configuration within this + building class. + """ - year = BIM['YearBuilt'] # just for the sake of brevity + year = bim['YearBuilt'] # just for the sake of brevity # Flood Type - if BIM['FloodZone'] == 'AO': + if bim['FloodZone'] == 'AO': flood_type = 'raz' # Riverline/A-Zone - elif BIM['FloodZone'] in ['A', 'AE']: - flood_type = 'cvz' # Costal-Zone - elif BIM['FloodZone'].startswith('V'): + elif bim['FloodZone'] in {'A', 'AE'} or bim['FloodZone'].startswith('V'): flood_type = 'cvz' # Costal-Zone else: flood_type = 'cvz' # Default @@ -81,7 +79,7 @@ def FL_config(BIM): # FFE = BIM['FirstFloorElevation'] - 1.0 # PostFIRM - PostFIRM = False # Default + post_firm = False # Default city_list = [ 'Absecon', 'Atlantic', @@ -132,115 +130,53 @@ def FL_config(BIM): 1971, 1979, ] - for i in range(0, 22): - PostFIRM = ( - (BIM['City'] == city_list[i]) and (year > year_list[i]) - ) or PostFIRM + for i in range(22): + post_firm = ( + (bim['City'] == city_list[i]) and (year > year_list[i]) + ) or post_firm # Basement Type - if BIM['SplitLevel'] and (BIM['FoundationType'] == 3504): + if bim['SplitLevel'] and (bim['FoundationType'] == 3504): bmt_type = 'spt' # Split-Level Basement - elif BIM['FoundationType'] in [3501, 3502, 3503, 3505, 3506, 3507]: + elif bim['FoundationType'] in {3501, 3502, 3503, 3505, 3506, 3507}: bmt_type = 'bn' # No Basement - elif (not BIM['SplitLevel']) and (BIM['FoundationType'] == 3504): + elif (not bim['SplitLevel']) and (bim['FoundationType'] == 3504): bmt_type = 'bw' # Basement else: bmt_type = 'bw' # Default - # flake8 - unused variable: `dur`. - # # Duration - # dur = 'short' - - # flake8 - unused variable: `OT`. - # # Occupancy Type - # if BIM['OccupancyClass'] == 'RES1': - # if BIM['NumberOfStories'] == 1: - # if flood_type == 'raz': - # OT = 'SF1XA' - # elif flood_type == 'cvz': - # OT = 'SF1XV' - # else: - # if bmt_type == 'nav': - # if flood_type == 'raz': - # OT = 'SF2XA' - # elif flood_type == 'cvz': - # OT = 'SF2XV' - # elif bmt_type == 'bmt': - # if flood_type == 'raz': - # OT = 'SF2BA' - # elif flood_type == 'cvz': - # OT = 'SF2BV' - # elif bmt_type == 'spt': - # if flood_type == 'raz': - # OT = 'SF2SA' - # elif flood_type == 'cvz': - # OT = 'SF2SV' - # elif 'RES3' in BIM['OccupancyClass']: - # OT = 'APT' - # else: - # ap_OT = { - # 'RES2': 'MH', - # 'RES4': 'HOT', - # 'RES5': 'NURSE', - # 'RES6': 'NURSE', - # 'COM1': 'RETAL', - # 'COM2': 'WHOLE', - # 'COM3': 'SERVICE', - # 'COM4': 'OFFICE', - # 'COM5': 'BANK', - # 'COM6': 'HOSP', - # 'COM7': 'MED', - # 'COM8': 'REC', - # 'COM9': 'THEAT', - # 'COM10': 'GARAGE', - # 'IND1': 'INDH', - # 'IND2': 'INDL', - # 'IND3': 'CHEM', - # 'IND4': 'PROC', - # 'IND5': 'CHEM', - # 'IND6': 'CONST', - # 'AGR1': 'AGRI', - # 'REL1': 'RELIG', - # 'GOV1': 'CITY', - # 'GOV2': 'EMERG', - # 'EDU1': 'SCHOOL', - # 'EDU2': 'SCHOOL', - # } - # ap_OT[BIM['OccupancyClass']] - - if BIM['OccupancyClass'] not in ['RES1', 'RES2']: - if 'RES3' in BIM['OccupancyClass']: + if bim['OccupancyClass'] not in {'RES1', 'RES2'}: + if 'RES3' in bim['OccupancyClass']: fl_config = f"{'fl'}_" f"{'RES3'}" else: - fl_config = f"{'fl'}_" f"{BIM['OccupancyClass']}" - elif BIM['OccupancyClass'] == 'RES2': - fl_config = f"{'fl'}_" f"{BIM['OccupancyClass']}_" f"{flood_type}" + fl_config = f"{'fl'}_" f"{bim['OccupancyClass']}" + elif bim['OccupancyClass'] == 'RES2': + fl_config = f"{'fl'}_" f"{bim['OccupancyClass']}_" f"{flood_type}" + elif bmt_type == 'spt': + fl_config = ( + f"{'fl'}_" + f"{bim['OccupancyClass']}_" + f"{'sl'}_" + f"{'bw'}_" + f"{flood_type}" + ) else: - if bmt_type == 'spt': - fl_config = ( - f"{'fl'}_" - f"{BIM['OccupancyClass']}_" - f"{'sl'}_" - f"{'bw'}_" - f"{flood_type}" - ) - else: - st = 's' + str(np.min([BIM['NumberOfStories'], 3])) - fl_config = ( - f"{'fl'}_" - f"{BIM['OccupancyClass']}_" - f"{st}_" - f"{bmt_type}_" - f"{flood_type}" - ) + st = 's' + str(np.min([bim['NumberOfStories'], 3])) + fl_config = ( + f"{'fl'}_" + f"{bim['OccupancyClass']}_" + f"{st}_" + f"{bmt_type}_" + f"{flood_type}" + ) # extend the BIM dictionary - BIM.update( - dict( - FloodType=flood_type, - BasementType=bmt_type, - PostFIRM=PostFIRM, - ) + bim.update( + { + 'FloodType': flood_type, + 'BasementType': bmt_type, + 'PostFIRM': post_firm, + } ) return fl_config diff --git a/pelicun/tests/dl_calculation/rulesets/MetaVarRulesets.py b/pelicun/tests/dl_calculation/rulesets/MetaVarRulesets.py index cd262c58b..cc6155740 100644 --- a/pelicun/tests/dl_calculation/rulesets/MetaVarRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/MetaVarRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -43,14 +42,16 @@ # Meredith Lockhead # Tracy Kijewski-Correa +from __future__ import annotations + import numpy as np -def parse_BIM(BIM_in, location, hazards): +def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: C901 """ Parses the information provided in the BIM model. - The atrributes below list the expected metadata in the BIM file + The attributes below list the expected metadata in the BIM file Parameters ---------- @@ -84,23 +85,28 @@ def parse_BIM(BIM_in, location, hazards): ------- BIM: dictionary Parsed building characteristics. - """ + Raises + ------ + KeyError + In case of missing attributes. + + """ # check location - if location not in ['LA', 'NJ']: - print(f'WARNING: The provided location is not recognized: {location}') + if location not in {'LA', 'NJ'}: + print(f'WARNING: The provided location is not recognized: {location}') # noqa: T201 # check hazard for hazard in hazards: - if hazard not in ['wind', 'inundation']: - print(f'WARNING: The provided hazard is not recognized: {hazard}') + if hazard not in {'wind', 'inundation'}: + print(f'WARNING: The provided hazard is not recognized: {hazard}') # noqa: T201 # initialize the BIM dict - BIM = {} + bim = {} if 'wind' in hazards: # maps roof type to the internal representation - ap_RoofType = { + ap_roof_type = { 'hip': 'hip', 'hipped': 'hip', 'Hip': 'hip', @@ -112,8 +118,8 @@ def parse_BIM(BIM_in, location, hazards): } # maps roof system to the internal representation - ap_RoofSystem = {'Wood': 'trs', 'OWSJ': 'ows', 'N/A': 'trs'} - roof_system = BIM_in.get('RoofSystem', 'Wood') + ap_roof_system = {'Wood': 'trs', 'OWSJ': 'ows', 'N/A': 'trs'} + roof_system = bim_in.get('RoofSystem', 'Wood') # flake8 - unused variable: `ap_NoUnits`. # # maps number of units to the internal representation @@ -130,13 +136,13 @@ def parse_BIM(BIM_in, location, hazards): # Year built alname_yearbuilt = ['yearBuilt', 'YearBuiltMODIV', 'YearBuiltNJDEP'] - yearbuilt = BIM_in.get('YearBuilt', None) + yearbuilt = bim_in.get('YearBuilt') # if none of the above works, set a default if yearbuilt is None: for alname in alname_yearbuilt: - if alname in BIM_in.keys(): - yearbuilt = BIM_in[alname] + if alname in bim_in: + yearbuilt = bim_in[alname] break if yearbuilt is None: @@ -150,65 +156,69 @@ def parse_BIM(BIM_in, location, hazards): 'NumberofStories1', ] - nstories = BIM_in.get('NumberOfStories', None) + nstories = bim_in.get('NumberOfStories') if nstories is None: for alname in alname_nstories: - if alname in BIM_in.keys(): - nstories = BIM_in[alname] + if alname in bim_in: + nstories = bim_in[alname] break if nstories is None: - raise KeyError("NumberOfStories attribute missing, cannot autopopulate") + msg = 'NumberOfStories attribute missing, cannot autopopulate' + raise KeyError(msg) # Plan Area alname_area = ['area', 'PlanArea1', 'Area', 'PlanArea0'] - area = BIM_in.get('PlanArea', None) + area = bim_in.get('PlanArea') if area is None: for alname in alname_area: - if alname in BIM_in.keys(): - area = BIM_in[alname] + if alname in bim_in: + area = bim_in[alname] break if area is None: - raise KeyError("PlanArea attribute missing, cannot autopopulate") + msg = 'PlanArea attribute missing, cannot autopopulate' + raise KeyError(msg) # Design Wind Speed alname_dws = ['DWSII', 'DesignWindSpeed'] - dws = BIM_in.get('DesignWindSpeed', None) + dws = bim_in.get('DesignWindSpeed') if dws is None: for alname in alname_dws: - if alname in BIM_in.keys(): - dws = BIM_in[alname] + if alname in bim_in: + dws = bim_in[alname] break if dws is None: - raise KeyError("DesignWindSpeed attribute missing, cannot autopopulate") + msg = 'DesignWindSpeed attribute missing, cannot autopopulate' + raise KeyError(msg) # occupancy type alname_occupancy = ['occupancy', 'OccupancyClass'] - oc = BIM_in.get('OccupancyClass', None) + oc = bim_in.get('OccupancyClass') if oc is None: for alname in alname_occupancy: - if alname in BIM_in.keys(): - oc = BIM_in[alname] + if alname in bim_in: + oc = bim_in[alname] break if oc is None: - raise KeyError("OccupancyClass attribute missing, cannot autopopulate") + msg = 'OccupancyClass attribute missing, cannot autopopulate' + raise KeyError(msg) # if getting RES3 then converting it to default RES3A if oc == 'RES3': oc = 'RES3A' # maps for BuildingType - ap_BuildingType_NJ = { + ap_building_type_nj = { # Coastal areas with a 1% or greater chance of flooding and an # additional hazard associated with storm waves. 3001: 'Wood', @@ -219,57 +229,56 @@ def parse_BIM(BIM_in, location, hazards): } if location == 'NJ': # NJDEP code for flood zone needs to be converted - buildingtype = ap_BuildingType_NJ[BIM_in['BuildingType']] + buildingtype = ap_building_type_nj[bim_in['BuildingType']] elif location == 'LA': # standard input should provide the building type as a string - buildingtype = BIM_in['BuildingType'] + buildingtype = bim_in['BuildingType'] # maps for design level (Marginal Engineered is mapped to # Engineered as defauplt) - ap_DesignLevel = {'E': 'E', 'NE': 'NE', 'PE': 'PE', 'ME': 'E'} - design_level = BIM_in.get('DesignLevel', 'E') + ap_design_level = {'E': 'E', 'NE': 'NE', 'PE': 'PE', 'ME': 'E'} + design_level = bim_in.get('DesignLevel', 'E') # flood zone - flood_zone = BIM_in.get('FloodZone', 'X') + flood_zone = bim_in.get('FloodZone', 'X') # add the parsed data to the BIM dict - BIM.update( - dict( - OccupancyClass=str(oc), - BuildingType=buildingtype, - YearBuilt=int(yearbuilt), - NumberOfStories=int(nstories), - PlanArea=float(area), - V_ult=float(dws), - AvgJanTemp=ap_ajt[BIM_in.get('AvgJanTemp', 'Below')], - RoofShape=ap_RoofType[BIM_in['RoofShape']], - RoofSlope=float(BIM_in.get('RoofSlope', 0.25)), # default 0.25 - SheathingThickness=float( - BIM_in.get('SheathingThick', 1.0) + bim.update( + { + 'OccupancyClass': str(oc), + 'BuildingType': buildingtype, + 'YearBuilt': int(yearbuilt), + 'NumberOfStories': int(nstories), + 'PlanArea': float(area), + 'V_ult': float(dws), + 'AvgJanTemp': ap_ajt[bim_in.get('AvgJanTemp', 'Below')], + 'RoofShape': ap_roof_type[bim_in['RoofShape']], + 'RoofSlope': float(bim_in.get('RoofSlope', 0.25)), # default 0.25 + 'SheathingThickness': float( + bim_in.get('SheathingThick', 1.0) ), # default 1.0 - RoofSystem=str( - ap_RoofSystem[roof_system] + 'RoofSystem': str( + ap_roof_system[roof_system] ), # only valid for masonry structures - Garage=float(BIM_in.get('Garage', -1.0)), - LULC=BIM_in.get('LULC', -1), - MeanRoofHt=float(BIM_in.get('MeanRoofHt', 15.0)), # default 15 - WindowArea=float(BIM_in.get('WindowArea', 0.20)), - WindZone=str(BIM_in.get('WindZone', 'I')), - FloodZone=str(flood_zone), - ) + 'Garage': float(bim_in.get('Garage', -1.0)), + 'LULC': bim_in.get('LULC', -1), + 'MeanRoofHt': float(bim_in.get('MeanRoofHt', 15.0)), # default 15 + 'WindowArea': float(bim_in.get('WindowArea', 0.20)), + 'WindZone': str(bim_in.get('WindZone', 'I')), + 'FloodZone': str(flood_zone), + } ) if 'inundation' in hazards: - # maps for split level - ap_SplitLevel = {'NO': 0, 'YES': 1} + ap_split_level = {'NO': 0, 'YES': 1} # foundation type - foundation = BIM_in.get('FoundationType', 3501) + foundation = bim_in.get('FoundationType', 3501) # number of units - nunits = BIM_in.get('NoUnits', 1) + nunits = bim_in.get('NoUnits', 1) # flake8 - unused variable: `ap_FloodZone`. # # maps for flood zone @@ -303,33 +312,34 @@ def parse_BIM(BIM_in, location, hazards): # floodzone_fema = BIM_in['FloodZone'] # add the parsed data to the BIM dict - BIM.update( - dict( - DesignLevel=str(ap_DesignLevel[design_level]), # default engineered - NumberOfUnits=int(nunits), - FirstFloorElevation=float(BIM_in.get('FirstFloorHt1', 10.0)), - SplitLevel=bool( - ap_SplitLevel[BIM_in.get('SplitLevel', 'NO')] + bim.update( + { + 'DesignLevel': str( + ap_design_level[design_level] + ), # default engineered + 'NumberOfUnits': int(nunits), + 'FirstFloorElevation': float(bim_in.get('FirstFloorHt1', 10.0)), + 'SplitLevel': bool( + ap_split_level[bim_in.get('SplitLevel', 'NO')] ), # dfault: no - FoundationType=int(foundation), # default: pile - City=BIM_in.get('City', 'NA'), - ) + 'FoundationType': int(foundation), # default: pile + 'City': bim_in.get('City', 'NA'), + } ) # add inferred, generic meta-variables if 'wind' in hazards: - # Hurricane-Prone Region (HRP) # Areas vulnerable to hurricane, defined as the U.S. Atlantic Ocean and # Gulf of Mexico coasts where the ultimate design wind speed, V_ult is # greater than a pre-defined limit. - if BIM['YearBuilt'] >= 2016: + if bim['YearBuilt'] >= 2016: # The limit is 115 mph in IRC 2015 - HPR = BIM['V_ult'] > 115.0 + hpr = bim['V_ult'] > 115.0 else: # The limit is 90 mph in IRC 2009 and earlier versions - HPR = BIM['V_ult'] > 90.0 + hpr = bim['V_ult'] > 90.0 # Wind Borne Debris # Areas within hurricane-prone regions are affected by debris if one of @@ -339,7 +349,7 @@ def parse_BIM(BIM_in, location, hazards): # (2) In areas where the ultimate design wind speed is greater than # general_lim # The flood_lim and general_lim limits depend on the year of construction - if BIM['YearBuilt'] >= 2016: + if bim['YearBuilt'] >= 2016: # In IRC 2015: flood_lim = 130.0 # mph general_lim = 140.0 # mph @@ -353,16 +363,16 @@ def parse_BIM(BIM_in, location, hazards): # where the ultimate design wind speed is 130 mph (58m/s) or greater. # (2) In areas where the ultimate design wind speed is 140 mph (63.5m/s) # or greater. (Definitions: Chapter 2, 2015 NJ Residential Code) - if not HPR: - WBD = False + if not hpr: + wbd = False else: - WBD = ( + wbd = ( ( - BIM['FloodZone'].startswith('A') - or BIM['FloodZone'].startswith('V') + bim['FloodZone'].startswith('A') + or bim['FloodZone'].startswith('V') ) - and BIM['V_ult'] >= flood_lim - ) or (BIM['V_ult'] >= general_lim) + and bim['V_ult'] >= flood_lim + ) or (bim['V_ult'] >= general_lim) # Terrain # open (0.03) = 3 @@ -374,7 +384,7 @@ def parse_BIM(BIM_in, location, hazards): # https://www.state.nj.us/dep/gis/ # digidownload/metadata/lulc02/anderson2002.html) by T. Wu # group (see internal report on roughness calculations, Table - # 4). These are mapped to Hazus defintions as follows: Open + # 4). These are mapped to Hazus definitions as follows: Open # Water (5400s) with zo=0.01 and barren land (7600) with # zo=0.04 assume Open Open Space Developed, Low Intensity # Developed, Medium Intensity Developed (1110-1140) assumed @@ -388,76 +398,74 @@ def parse_BIM(BIM_in, location, hazards): # Note: HAZUS category of trees (1.00) does not apply to any # LU/LC in NJ terrain = 15 # Default in Reorganized Rulesets - WIND - if location == "NJ": - if BIM['FloodZone'].startswith('V') or BIM['FloodZone'] in [ + if location == 'NJ': + if bim['FloodZone'].startswith('V') or bim['FloodZone'] in { 'A', 'AE', 'A1-30', 'AR', 'A99', - ]: + }: terrain = 3 - elif (BIM['LULC'] >= 5000) and (BIM['LULC'] <= 5999): - terrain = 3 # Open - elif ((BIM['LULC'] == 4400) or (BIM['LULC'] == 6240)) or ( - BIM['LULC'] == 7600 + elif ((bim['LULC'] >= 5000) and (bim['LULC'] <= 5999)) or ( + ((bim['LULC'] == 4400) or (bim['LULC'] == 6240)) + or (bim['LULC'] == 7600) ): terrain = 3 # Open - elif (BIM['LULC'] >= 2000) and (BIM['LULC'] <= 2999): + elif (bim['LULC'] >= 2000) and (bim['LULC'] <= 2999): terrain = 15 # Light suburban - elif ((BIM['LULC'] >= 1110) and (BIM['LULC'] <= 1140)) or ( - (BIM['LULC'] >= 6250) and (BIM['LULC'] <= 6252) + elif ((bim['LULC'] >= 1110) and (bim['LULC'] <= 1140)) or ( + (bim['LULC'] >= 6250) and (bim['LULC'] <= 6252) ): terrain = 35 # Suburban - elif ((BIM['LULC'] >= 4100) and (BIM['LULC'] <= 4300)) or ( - BIM['LULC'] == 1600 + elif ((bim['LULC'] >= 4100) and (bim['LULC'] <= 4300)) or ( + bim['LULC'] == 1600 ): terrain = 70 # light trees - elif location == "LA": - if BIM['FloodZone'].startswith('V') or BIM['FloodZone'] in [ + elif location == 'LA': + if bim['FloodZone'].startswith('V') or bim['FloodZone'] in { 'A', 'AE', 'A1-30', 'AR', 'A99', - ]: + }: terrain = 3 - elif (BIM['LULC'] >= 50) and (BIM['LULC'] <= 59): - terrain = 3 # Open - elif ((BIM['LULC'] == 44) or (BIM['LULC'] == 62)) or (BIM['LULC'] == 76): + elif ((bim['LULC'] >= 50) and (bim['LULC'] <= 59)) or ( + ((bim['LULC'] == 44) or (bim['LULC'] == 62)) or (bim['LULC'] == 76) + ): terrain = 3 # Open - elif (BIM['LULC'] >= 20) and (BIM['LULC'] <= 29): + elif (bim['LULC'] >= 20) and (bim['LULC'] <= 29): terrain = 15 # Light suburban - elif (BIM['LULC'] == 11) or (BIM['LULC'] == 61): + elif (bim['LULC'] == 11) or (bim['LULC'] == 61): terrain = 35 # Suburban - elif ((BIM['LULC'] >= 41) and (BIM['LULC'] <= 43)) or ( - BIM['LULC'] in [16, 17] + elif ((bim['LULC'] >= 41) and (bim['LULC'] <= 43)) or ( + bim['LULC'] in {16, 17} ): terrain = 70 # light trees - BIM.update( - dict( + bim.update( + { # Nominal Design Wind Speed # Former term was “Basic Wind Speed”; it is now the “Nominal Design # Wind Speed (V_asd). Unit: mph." - V_asd=np.sqrt(0.6 * BIM['V_ult']), - HazardProneRegion=HPR, - WindBorneDebris=WBD, - TerrainRoughness=terrain, - ) + 'V_asd': np.sqrt(0.6 * bim['V_ult']), + 'HazardProneRegion': hpr, + 'WindBorneDebris': wbd, + 'TerrainRoughness': terrain, + } ) if 'inundation' in hazards: - - BIM.update( - dict( + bim.update( + { # Flood Risk # Properties in the High Water Zone (within 1 mile of # the coast) are at risk of flooding and other # wind-borne debris action. - # TODO: need high water zone for this and move it to inputs! - FloodRisk=True, - ) + # TODO: need high water zone for this and move it to inputs! # noqa: TD002 + 'FloodRisk': True, + } ) - return BIM + return bim diff --git a/pelicun/tests/dl_calculation/rulesets/WindCECBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindCECBRulesets.py index 3f732bcaa..5b52b7a45 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindCECBRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindCECBRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -46,9 +45,9 @@ import random -def CECB_config(BIM): +def CECB_config(bim: dict) -> str: # noqa: C901 """ - Rules to identify a HAZUS CECB configuration based on BIM data + Rules to identify a HAZUS CECB configuration based on BIM data. Parameters ---------- @@ -58,26 +57,25 @@ def CECB_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Roof cover - if BIM['RoofShape'] in ['gab', 'hip']: + if bim['RoofShape'] in {'gab', 'hip'}: roof_cover = 'bur' # Warning: HAZUS does not have N/A option for CECB, so here we use bur + elif year >= 1975: + roof_cover = 'spm' else: - if year >= 1975: - roof_cover = 'spm' - else: - # year < 1975 - roof_cover = 'bur' + # year < 1975 + roof_cover = 'bur' # shutters if year >= 2000: - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -87,54 +85,51 @@ def CECB_config(BIM): # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. + elif bim['WindBorneDebris']: + shutters = random.random() < 0.46 else: - if BIM['WindBorneDebris']: - shutters = random.random() < 0.46 - else: - shutters = False + shutters = False # Wind Debris (widd in HAZSU) # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None - WIDD = 'C' # residential (default) - if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D']: - WIDD = 'C' # residential - elif BIM['OccupancyClass'] == 'AGR1': - WIDD = 'D' # None + widd = 'C' # residential (default) + if bim['OccupancyClass'] in {'RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D'}: + widd = 'C' # residential + elif bim['OccupancyClass'] == 'AGR1': + widd = 'D' # None else: - WIDD = 'A' # Res/Comm + widd = 'A' # Res/Comm # Window area ratio - if BIM['WindowArea'] < 0.33: - WWR = 'low' - elif BIM['WindowArea'] < 0.5: - WWR = 'med' + if bim['WindowArea'] < 0.33: + wwr = 'low' + elif bim['WindowArea'] < 0.5: + wwr = 'med' else: - WWR = 'hig' + wwr = 'hig' - if BIM['NumberOfStories'] <= 2: + if bim['NumberOfStories'] <= 2: bldg_tag = 'C.ECB.L' - elif BIM['NumberOfStories'] <= 5: + elif bim['NumberOfStories'] <= 5: bldg_tag = 'C.ECB.M' else: bldg_tag = 'C.ECB.H' # extend the BIM dictionary - BIM.update( - dict( - RoofCover=roof_cover, - Shutters=shutters, - WindowAreaRatio=WWR, - WindDebrisClass=WIDD, - ) + bim.update( + { + 'RoofCover': roof_cover, + 'Shutters': shutters, + 'WindowAreaRatio': wwr, + 'WindDebrisClass': widd, + } ) - bldg_config = ( + return ( f"{bldg_tag}." f"{roof_cover}." f"{int(shutters)}." - f"{WIDD}." - f"{WWR}." - f"{int(BIM['TerrainRoughness'])}" + f"{widd}." + f"{wwr}." + f"{int(bim['TerrainRoughness'])}" ) - - return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindCERBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindCERBRulesets.py index eba699a6d..0b75ebad3 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindCERBRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindCERBRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -46,9 +45,9 @@ import random -def CERB_config(BIM): +def CERB_config(bim: dict) -> str: # noqa: C901 """ - Rules to identify a HAZUS CERB configuration based on BIM data + Rules to identify a HAZUS CERB configuration based on BIM data. Parameters ---------- @@ -58,26 +57,25 @@ def CERB_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Roof cover - if BIM['RoofShape'] in ['gab', 'hip']: + if bim['RoofShape'] in {'gab', 'hip'}: roof_cover = 'bur' # Warning: HAZUS does not have N/A option for CECB, so here we use bur + elif year >= 1975: + roof_cover = 'spm' else: - if year >= 1975: - roof_cover = 'spm' - else: - # year < 1975 - roof_cover = 'bur' + # year < 1975 + roof_cover = 'bur' # shutters if year >= 2000: - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -87,54 +85,51 @@ def CERB_config(BIM): # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. + elif bim['WindBorneDebris']: + shutters = random.random() < 0.45 else: - if BIM['WindBorneDebris']: - shutters = random.random() < 0.45 - else: - shutters = False + shutters = False # Wind Debris (widd in HAZUS) # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None - WIDD = 'C' # residential (default) - if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D']: - WIDD = 'C' # residential - elif BIM['OccupancyClass'] == 'AGR1': - WIDD = 'D' # None + widd = 'C' # residential (default) + if bim['OccupancyClass'] in {'RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D'}: + widd = 'C' # residential + elif bim['OccupancyClass'] == 'AGR1': + widd = 'D' # None else: - WIDD = 'A' # Res/Comm + widd = 'A' # Res/Comm # Window area ratio - if BIM['WindowArea'] < 0.33: - WWR = 'low' - elif BIM['WindowArea'] < 0.5: - WWR = 'med' + if bim['WindowArea'] < 0.33: + wwr = 'low' + elif bim['WindowArea'] < 0.5: + wwr = 'med' else: - WWR = 'hig' + wwr = 'hig' - if BIM['NumberOfStories'] <= 2: + if bim['NumberOfStories'] <= 2: bldg_tag = 'C.ERB.L' - elif BIM['NumberOfStories'] <= 5: + elif bim['NumberOfStories'] <= 5: bldg_tag = 'C.ERB.M' else: bldg_tag = 'C.ERB.H' # extend the BIM dictionary - BIM.update( - dict( - RoofCover=roof_cover, - Shutters=shutters, - WindowAreaRatio=WWR, - WindDebrisClass=WIDD, - ) + bim.update( + { + 'RoofCover': roof_cover, + 'Shutters': shutters, + 'WindowAreaRatio': wwr, + 'WindDebrisClass': widd, + } ) - bldg_config = ( + return ( f"{bldg_tag}." f"{roof_cover}." f"{int(shutters)}." - f"{WIDD}." - f"{WWR}." - f"{int(BIM['TerrainRoughness'])}" + f"{widd}." + f"{wwr}." + f"{int(bim['TerrainRoughness'])}" ) - - return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindEFRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindEFRulesets.py index 9e3245457..bd81df21b 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindEFRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindEFRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -43,13 +42,13 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import random import datetime +import random -def HUEFFS_config(BIM): +def HUEFFS_config(bim: dict) -> str: """ - Rules to identify a HAZUS HUEFFS/HUEFSS configuration based on BIM data + Rules to identify a HAZUS HUEFFS/HUEFSS configuration based on BIM data. Parameters ---------- @@ -59,68 +58,62 @@ def HUEFFS_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Roof cover - if year >= 1975: - roof_cover = 'spm' - else: - # year < 1975 - roof_cover = 'bur' + roof_cover = 'spm' if year >= 1975 else 'bur' # Wind debris - WIDD = 'A' + widd = 'A' # Roof deck age - if year >= (datetime.datetime.now().year - 50): - DQ = 'god' # new or average + if year >= (datetime.datetime.now(tz=datetime.timezone.utc).year - 50): + dq = 'god' # new or average else: - DQ = 'por' # old + dq = 'por' # old # Metal-RDA if year > 2000: - if BIM['V_ult'] <= 142: - MRDA = 'std' # standard + if bim['V_ult'] <= 142: + mrda = 'std' # standard else: - MRDA = 'sup' # superior + mrda = 'sup' # superior else: - MRDA = 'std' # standard + mrda = 'std' # standard # Shutters - shutters = int(BIM['WBD']) + shutters = int(bim['WBD']) # extend the BIM dictionary - BIM.update( - dict( - RoofCover=roof_cover, - RoofDeckAttachmentM=MRDA, - RoofDeckAge=DQ, - WindDebrisClass=WIDD, - Shutters=shutters, - ) + bim.update( + { + 'RoofCover': roof_cover, + 'RoofDeckAttachmentM': mrda, + 'RoofDeckAge': dq, + 'WindDebrisClass': widd, + 'Shutters': shutters, + } ) bldg_tag = 'HUEF.FS' - bldg_config = ( + return ( f"{bldg_tag}." f"{roof_cover}." f"{shutters}." - f"{WIDD}." - f"{DQ}." - f"{MRDA}." - f"{int(BIM['TerrainRoughness'])}" + f"{widd}." + f"{dq}." + f"{mrda}." + f"{int(bim['TerrainRoughness'])}" ) - return bldg_config - -def HUEFSS_config(BIM): +def HUEFSS_config(bim: dict) -> str: """ - Rules to identify a HAZUS HUEFFS/HUEFSS configuration based on BIM data + Rules to identify a HAZUS HUEFFS/HUEFSS configuration based on BIM data. Parameters ---------- @@ -130,11 +123,11 @@ def HUEFSS_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Roof cover if year >= 1975: @@ -144,54 +137,52 @@ def HUEFSS_config(BIM): roof_cover = 'bur' # Wind debris - WIDD = 'A' + widd = 'A' # Roof deck age - if year >= (datetime.datetime.now().year - 50): - DQ = 'god' # new or average + if year >= (datetime.datetime.now(tz=datetime.timezone.utc).year - 50): + dq = 'god' # new or average else: - DQ = 'por' # old + dq = 'por' # old # Metal-RDA if year > 2000: - if BIM['V_ult'] <= 142: - MRDA = 'std' # standard + if bim['V_ult'] <= 142: + mrda = 'std' # standard else: - MRDA = 'sup' # superior + mrda = 'sup' # superior else: - MRDA = 'std' # standard + mrda = 'std' # standard # Shutters - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] # extend the BIM dictionary - BIM.update( - dict( - RoofCover=roof_cover, - RoofDeckAttachmentM=MRDA, - RoofDeckAge=DQ, - WindDebrisClass=WIDD, - Shutters=shutters, - ) + bim.update( + { + 'RoofCover': roof_cover, + 'RoofDeckAttachmentM': mrda, + 'RoofDeckAge': dq, + 'WindDebrisClass': widd, + 'Shutters': shutters, + } ) bldg_tag = 'HUEF.S.S' - bldg_config = ( + return ( f"{bldg_tag}." f"{roof_cover}." f"{int(shutters)}." - f"{WIDD}." - f"{DQ}." - f"{MRDA}." - f"{int(BIM['TerrainRoughness'])}" + f"{widd}." + f"{dq}." + f"{mrda}." + f"{int(bim['TerrainRoughness'])}" ) - return bldg_config - -def HUEFH_config(BIM): +def HUEFH_config(bim: dict) -> str: """ - Rules to identify a HAZUS HUEFH configuration based on BIM data + Rules to identify a HAZUS HUEFH configuration based on BIM data. Parameters ---------- @@ -201,11 +192,11 @@ def HUEFH_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Roof cover if year >= 1975: @@ -215,52 +206,50 @@ def HUEFH_config(BIM): roof_cover = 'bur' # Wind debris - WIDD = 'A' + widd = 'A' # Shutters - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] # Metal-RDA if year > 2000: - if BIM['V_ult'] <= 142: - MRDA = 'std' # standard + if bim['V_ult'] <= 142: + mrda = 'std' # standard else: - MRDA = 'sup' # superior + mrda = 'sup' # superior else: - MRDA = 'std' # standard + mrda = 'std' # standard - if BIM['NumberOfStories'] <= 2: + if bim['NumberOfStories'] <= 2: bldg_tag = 'HUEF.H.S' - elif BIM['NumberOfStories'] <= 5: + elif bim['NumberOfStories'] <= 5: bldg_tag = 'HUEF.H.M' else: bldg_tag = 'HUEF.H.L' # extend the BIM dictionary - BIM.update( - dict( - RoofCover=roof_cover, - RoofDeckAttachmentM=MRDA, - WindDebrisClass=WIDD, - Shutters=shutters, - ) + bim.update( + { + 'RoofCover': roof_cover, + 'RoofDeckAttachmentM': mrda, + 'WindDebrisClass': widd, + 'Shutters': shutters, + } ) - bldg_config = ( + return ( f"{bldg_tag}." f"{roof_cover}." - f"{WIDD}." - f"{MRDA}." + f"{widd}." + f"{mrda}." f"{int(shutters)}." - f"{int(BIM['TerrainRoughness'])}" + f"{int(bim['TerrainRoughness'])}" ) - return bldg_config - -def HUEFS_config(BIM): +def HUEFS_config(bim: dict) -> str: """ - Rules to identify a HAZUS HUEFS configuration based on BIM data + Rules to identify a HAZUS HUEFS configuration based on BIM data. Parameters ---------- @@ -270,11 +259,11 @@ def HUEFS_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Roof cover if year >= 1975: @@ -284,50 +273,43 @@ def HUEFS_config(BIM): roof_cover = 'bur' # Wind debris - WIDD = 'C' + widd = 'C' # Shutters if year > 2000: - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] + elif bim['WindBorneDebris']: + shutters = random.random() < 0.46 else: - # year <= 2000 - if BIM['WindBorneDebris']: - shutters = random.random() < 0.46 - else: - shutters = False + shutters = False # Metal-RDA if year > 2000: - if BIM['V_ult'] <= 142: - MRDA = 'std' # standard + if bim['V_ult'] <= 142: + mrda = 'std' # standard else: - MRDA = 'sup' # superior + mrda = 'sup' # superior else: - MRDA = 'std' # standard + mrda = 'std' # standard - if BIM['NumberOfStories'] <= 2: - bldg_tag = 'HUEF.S.M' - else: - bldg_tag = 'HUEF.S.L' + bldg_tag = 'HUEF.S.M' if bim['NumberOfStories'] <= 2 else 'HUEF.S.L' # extend the BIM dictionary - BIM.update( - dict( - RoofCover=roof_cover, - RoofDeckAttachmentM=MRDA, - WindDebrisClass=WIDD, - Shutters=shutters, - ) + bim.update( + { + 'RoofCover': roof_cover, + 'RoofDeckAttachmentM': mrda, + 'WindDebrisClass': widd, + 'Shutters': shutters, + } ) - bldg_config = ( + return ( f"{bldg_tag}." f"{roof_cover}." f"{int(shutters)}." - f"{WIDD}." + f"{widd}." f"null." - f"{MRDA}." - f"{int(BIM['TerrainRoughness'])}" + f"{mrda}." + f"{int(bim['TerrainRoughness'])}" ) - - return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindMECBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMECBRulesets.py index 02e34a1be..42db6dc07 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMECBRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMECBRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -46,9 +45,9 @@ import random -def MECB_config(BIM): +def MECB_config(bim: dict) -> str: # noqa: C901 """ - Rules to identify a HAZUS MECB configuration based on BIM data + Rules to identify a HAZUS MECB configuration based on BIM data. Parameters ---------- @@ -58,88 +57,84 @@ def MECB_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Roof cover - if BIM['RoofShape'] in ['gab', 'hip']: + if bim['RoofShape'] in {'gab', 'hip'}: roof_cover = 'bur' - # no info, using the default supoorted by HAZUS + # no info, using the default supported by HAZUS + elif year >= 1975: + roof_cover = 'spm' else: - if year >= 1975: - roof_cover = 'spm' - else: - # year < 1975 - roof_cover = 'bur' + # year < 1975 + roof_cover = 'bur' # shutters if year >= 2000: - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] + elif bim['WindBorneDebris']: + shutters = random.random() < 0.46 else: - if BIM['WindBorneDebris']: - shutters = random.random() < 0.46 - else: - shutters = False + shutters = False # Wind Debris (widd in HAZSU) # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None - WIDD = 'C' # residential (default) - if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D']: - WIDD = 'C' # residential - elif BIM['OccupancyClass'] == 'AGR1': - WIDD = 'D' # None + widd = 'C' # residential (default) + if bim['OccupancyClass'] in {'RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D'}: + widd = 'C' # residential + elif bim['OccupancyClass'] == 'AGR1': + widd = 'D' # None else: - WIDD = 'A' # Res/Comm + widd = 'A' # Res/Comm # Metal RDA # 1507.2.8.1 High Wind Attachment. # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with - # the manufacturer’s instructions. Fasteners are to be applied along + # the manufacturer's instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. - if BIM['V_ult'] > 142: - MRDA = 'std' # standard + if bim['V_ult'] > 142: + mrda = 'std' # standard else: - MRDA = 'sup' # superior + mrda = 'sup' # superior # Window area ratio - if BIM['WindowArea'] < 0.33: - WWR = 'low' - elif BIM['WindowArea'] < 0.5: - WWR = 'med' + if bim['WindowArea'] < 0.33: + wwr = 'low' + elif bim['WindowArea'] < 0.5: + wwr = 'med' else: - WWR = 'hig' + wwr = 'hig' - if BIM['NumberOfStories'] <= 2: + if bim['NumberOfStories'] <= 2: bldg_tag = 'M.ECB.L' - elif BIM['NumberOfStories'] <= 5: + elif bim['NumberOfStories'] <= 5: bldg_tag = 'M.ECB.M' else: bldg_tag = 'M.ECB.H' # extend the BIM dictionary - BIM.update( - dict( - RoofCover=roof_cover, - RoofDeckAttachmentM=MRDA, - Shutters=shutters, - WindowAreaRatio=WWR, - WindDebrisClass=WIDD, - ) + bim.update( + { + 'RoofCover': roof_cover, + 'RoofDeckAttachmentM': mrda, + 'Shutters': shutters, + 'WindowAreaRatio': wwr, + 'WindDebrisClass': widd, + } ) - bldg_config = ( + return ( f"{bldg_tag}." f"{roof_cover}." f"{int(shutters)}." - f"{WIDD}." - f"{MRDA}." - f"{WWR}." - f"{int(BIM['TerrainRoughness'])}" + f"{widd}." + f"{mrda}." + f"{wwr}." + f"{int(bim['TerrainRoughness'])}" ) - - return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindMERBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMERBRulesets.py index ab015762e..4158cd74e 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMERBRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMERBRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -46,9 +45,9 @@ import random -def MERB_config(BIM): +def MERB_config(bim: dict) -> str: # noqa: C901 """ - Rules to identify a HAZUS MERB configuration based on BIM data + Rules to identify a HAZUS MERB configuration based on BIM data. Parameters ---------- @@ -58,88 +57,84 @@ def MERB_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Roof cover - if BIM['RoofShape'] in ['gab', 'hip']: + if bim['RoofShape'] in {'gab', 'hip'}: roof_cover = 'bur' - # no info, using the default supoorted by HAZUS + # no info, using the default supported by HAZUS + elif year >= 1975: + roof_cover = 'spm' else: - if year >= 1975: - roof_cover = 'spm' - else: - # year < 1975 - roof_cover = 'bur' + # year < 1975 + roof_cover = 'bur' # shutters if year >= 2000: - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] + elif bim['WindBorneDebris']: + shutters = random.random() < 0.45 else: - if BIM['WindBorneDebris']: - shutters = random.random() < 0.45 - else: - shutters = False + shutters = False # Wind Debris (widd in HAZSU) # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None - WIDD = 'C' # residential (default) - if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D']: - WIDD = 'C' # residential - elif BIM['OccupancyClass'] == 'AGR1': - WIDD = 'D' # None + widd = 'C' # residential (default) + if bim['OccupancyClass'] in {'RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D'}: + widd = 'C' # residential + elif bim['OccupancyClass'] == 'AGR1': + widd = 'D' # None else: - WIDD = 'A' # Res/Comm + widd = 'A' # Res/Comm # Metal RDA # 1507.2.8.1 High Wind Attachment. # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with - # the manufacturer’s instructions. Fasteners are to be applied along + # the manufacturer's instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. - if BIM['V_ult'] > 142: - MRDA = 'std' # standard + if bim['V_ult'] > 142: + mrda = 'std' # standard else: - MRDA = 'sup' # superior + mrda = 'sup' # superior # Window area ratio - if BIM['WindowArea'] < 0.33: - WWR = 'low' - elif BIM['WindowArea'] < 0.5: - WWR = 'med' + if bim['WindowArea'] < 0.33: + wwr = 'low' + elif bim['WindowArea'] < 0.5: + wwr = 'med' else: - WWR = 'hig' + wwr = 'hig' - if BIM['NumberOfStories'] <= 2: + if bim['NumberOfStories'] <= 2: bldg_tag = 'M.ERB.L' - elif BIM['NumberOfStories'] <= 5: + elif bim['NumberOfStories'] <= 5: bldg_tag = 'M.ERB.M' else: bldg_tag = 'M.ERB.H' # extend the BIM dictionary - BIM.update( - dict( - RoofCover=roof_cover, - RoofDeckAttachmentM=MRDA, - Shutters=shutters, - WindowAreaRatio=WWR, - WindDebrisClass=WIDD, - ) + bim.update( + { + 'RoofCover': roof_cover, + 'RoofDeckAttachmentM': mrda, + 'Shutters': shutters, + 'WindowAreaRatio': wwr, + 'WindDebrisClass': widd, + } ) - bldg_config = ( + return ( f"{bldg_tag}." f"{roof_cover}." f"{int(shutters)}." - f"{WIDD}." - f"{MRDA}." - f"{WWR}." - f"{int(BIM['TerrainRoughness'])}" + f"{widd}." + f"{mrda}." + f"{wwr}." + f"{int(bim['TerrainRoughness'])}" ) - - return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindMHRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMHRulesets.py index d92004de9..37480fc17 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMHRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMHRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -46,9 +45,9 @@ import random -def MH_config(BIM): +def MH_config(bim: dict) -> str: """ - Rules to identify a HAZUS WSF configuration based on BIM data + Rules to identify a HAZUS WSF configuration based on BIM data. Parameters ---------- @@ -58,58 +57,44 @@ def MH_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity if year <= 1976: # MHPHUD bldg_tag = 'MH.PHUD' - if BIM['WindBorneDebris']: - shutters = random.random() < 0.45 - else: - shutters = False + shutters = random.random() < 0.45 if bim['WindBorneDebris'] else False # TieDowns - TD = random.random() < 0.45 + tie_downs = random.random() < 0.45 elif year <= 1994: # MH76HUD bldg_tag = 'MH.76HUD' - if BIM['WindBorneDebris']: - shutters = random.random() < 0.45 - else: - shutters = False + shutters = random.random() < 0.45 if bim['WindBorneDebris'] else False # TieDowns - TD = random.random() < 0.45 + tie_downs = random.random() < 0.45 else: # MH94HUD I, II, III - if BIM['V_ult'] >= 100.0: - shutters = True - else: - shutters = False + shutters = bim['V_ult'] >= 100.0 # TieDowns - if BIM['V_ult'] >= 70.0: - TD = True - else: - TD = False + tie_downs = bim['V_ult'] >= 70.0 - bldg_tag = 'MH.94HUD' + BIM['WindZone'] + bldg_tag = 'MH.94HUD' + bim['WindZone'] # extend the BIM dictionary - BIM.update( - dict( - TieDowns=TD, - Shutters=shutters, - ) + bim.update( + { + 'TieDowns': tie_downs, + 'Shutters': shutters, + } ) - bldg_config = ( + return ( f"{bldg_tag}." f"{int(shutters)}." - f"{int(TD)}." - f"{int(BIM['TerrainRoughness'])}" + f"{int(tie_downs)}." + f"{int(bim['TerrainRoughness'])}" ) - - return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindMLRIRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMLRIRulesets.py index a09c56cdf..3a46c7199 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMLRIRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMLRIRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -46,9 +45,9 @@ import datetime -def MLRI_config(BIM): +def MLRI_config(bim: dict) -> str: """ - Rules to identify a HAZUS MLRI configuration based on BIM data + Rules to identify a HAZUS MLRI configuration based on BIM data. Parameters ---------- @@ -58,14 +57,14 @@ def MLRI_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # MR - MR = True + mr = True # Shutters shutters = False @@ -75,49 +74,50 @@ def MLRI_config(BIM): # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with - # the manufacturer’s instructions. Fasteners are to be applied along + # the manufacturer's instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. - if BIM['V_ult'] > 142: - MRDA = 'std' # standard + if bim['V_ult'] > 142: + mrda = 'std' # standard else: - MRDA = 'sup' # superior + mrda = 'sup' # superior - if BIM['RoofShape'] in ['gab', 'hip']: + if bim['RoofShape'] in {'gab', 'hip'}: roof_cover = 'null' roof_quality = 'god' # default supported by HAZUS + elif year >= 1975: + roof_cover = 'spm' + if bim['YearBuilt'] >= ( + datetime.datetime.now(tz=datetime.timezone.utc).year - 35 + ): + roof_quality = 'god' + else: + roof_quality = 'por' else: - if year >= 1975: - roof_cover = 'spm' - if BIM['YearBuilt'] >= (datetime.datetime.now().year - 35): - roof_quality = 'god' - else: - roof_quality = 'por' + # year < 1975 + roof_cover = 'bur' + if bim['YearBuilt'] >= ( + datetime.datetime.now(tz=datetime.timezone.utc).year - 30 + ): + roof_quality = 'god' else: - # year < 1975 - roof_cover = 'bur' - if BIM['YearBuilt'] >= (datetime.datetime.now().year - 30): - roof_quality = 'god' - else: - roof_quality = 'por' + roof_quality = 'por' # extend the BIM dictionary - BIM.update( - dict( - RoofCover=roof_cover, - RoofQuality=roof_quality, - RoofDeckAttachmentM=MRDA, - Shutters=shutters, - MasonryReinforcing=MR, - ) + bim.update( + { + 'RoofCover': roof_cover, + 'RoofQuality': roof_quality, + 'RoofDeckAttachmentM': mrda, + 'Shutters': shutters, + 'MasonryReinforcing': mr, + } ) - bldg_config = ( + return ( f"M.LRI." f"{int(shutters)}." - f"{int(MR)}." + f"{int(mr)}." f"{roof_quality}." - f"{MRDA}." - f"{int(BIM['TerrainRoughness'])}" + f"{mrda}." + f"{int(bim['TerrainRoughness'])}" ) - - return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindMLRMRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMLRMRulesets.py index 3b5e6246e..8354e8b30 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMLRMRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMLRMRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -43,13 +42,13 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import random import datetime +import random -def MLRM_config(BIM): +def MLRM_config(bim: dict) -> str: # noqa: C901 """ - Rules to identify a HAZUS MLRM configuration based on BIM data + Rules to identify a HAZUS MLRM configuration based on BIM data. Parameters ---------- @@ -59,17 +58,17 @@ def MLRM_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Note the only roof option for commercial masonry in NJ appraisers manual # is OSWJ, so this suggests they do not even see alternate roof system # ref: Custom Inventory google spreadsheet H-37 10/01/20 # This could be commented for other regions if detailed data are available - BIM['RoofSystem'] = 'ows' + bim['RoofSystem'] = 'ows' # Roof cover # Roof cover does not apply to gable and hip roofs @@ -90,106 +89,104 @@ def MLRM_config(BIM): # surrounding the opening, and the attachments are resistant to # corrosion and are able to resist component and cladding loads; # Earlier IRC editions provide similar rules. - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] # Masonry Reinforcing (MR) # R606.6.4.1.2 Metal Reinforcement states that walls other than # interior non-load-bearing walls shall be anchored at vertical # intervals of not more than 8 inches with joint reinforcement of - # not less than 9 gage. Therefore this ruleset assumes that all + # not less than 9-gage. Therefore this ruleset assumes that all # exterior or load-bearing masonry walls will have # reinforcement. Since our considerations deal with wind speed, I # made the assumption that only exterior walls are being taken # into consideration. - MR = True + mr = True # Wind Debris (widd in HAZSU) # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None - WIDD = 'C' # residential (default) - if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D']: - WIDD = 'C' # residential - elif BIM['OccupancyClass'] == 'AGR1': - WIDD = 'D' # None + widd = 'C' # residential (default) + if bim['OccupancyClass'] in {'RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D'}: + widd = 'C' # residential + elif bim['OccupancyClass'] == 'AGR1': + widd = 'D' # None else: - WIDD = 'A' # Res/Comm + widd = 'A' # Res/Comm - if BIM['RoofSystem'] == 'ows': + if bim['RoofSystem'] == 'ows': # RDA - RDA = 'null' # Doesn't apply to OWSJ + rda = 'null' # Doesn't apply to OWSJ # Roof deck age (DQ) # Average lifespan of a steel joist roof is roughly 50 years according # to the source below. Therefore, if constructed 50 years before the # current year, the roof deck should be considered old. # https://www.metalroofing.systems/metal-roofing-pros-cons/ - if year >= (datetime.datetime.now().year - 50): - DQ = 'god' # new or average + if year >= (datetime.datetime.now(tz=datetime.timezone.utc).year - 50): + dq = 'god' # new or average else: - DQ = 'por' # old + dq = 'por' # old # RWC - RWC = 'null' # Doesn't apply to OWSJ + rwc = 'null' # Doesn't apply to OWSJ # Metal RDA # 1507.2.8.1 High Wind Attachment. # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with - # the manufacturer’s instructions. Fasteners are to be applied along + # the manufacturer's instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. - if BIM['V_ult'] > 142: - MRDA = 'std' # standard + if bim['V_ult'] > 142: + mrda = 'std' # standard else: - MRDA = 'sup' # superior + mrda = 'sup' # superior - elif BIM['RoofSystem'] == 'trs': + elif bim['RoofSystem'] == 'trs': # This clause should not be activated for NJ # RDA - if BIM['TerrainRoughness'] >= 35: # suburban or light trees - if BIM['V_ult'] > 130.0: - RDA = '8s' # 8d @ 6"/6" 'D' + if bim['TerrainRoughness'] >= 35: # suburban or light trees + if bim['V_ult'] > 130.0: + rda = '8s' # 8d @ 6"/6" 'D' else: - RDA = '8d' # 8d @ 6"/12" 'B' - else: # light suburban or open - if BIM['V_ult'] > 110.0: - RDA = '8s' # 8d @ 6"/6" 'D' - else: - RDA = '8d' # 8d @ 6"/12" 'B' + rda = '8d' # 8d @ 6"/12" 'B' + elif bim['V_ult'] > 110.0: + rda = '8s' # 8d @ 6"/6" 'D' + else: + rda = '8d' # 8d @ 6"/12" 'B' # Metal RDA - MRDA = 'null' # Doesn't apply to Wood Truss + mrda = 'null' # Doesn't apply to Wood Truss # Roof deck agea (DQ) - DQ = 'null' # Doesn't apply to Wood Truss + dq = 'null' # Doesn't apply to Wood Truss # RWC - if BIM['V_ult'] > 110: - RWC = 'strap' # Strap + if bim['V_ult'] > 110: + rwc = 'strap' # Strap else: - RWC = 'tnail' # Toe-nail + rwc = 'tnail' # Toe-nail # shutters if year >= 2000: - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] + elif bim['WindBorneDebris']: + shutters = random.random() < 0.46 else: - if BIM['WindBorneDebris']: - shutters = random.random() < 0.46 - else: - shutters = False + shutters = False - if BIM['MeanRoofHt'] < 15.0: + if bim['MeanRoofHt'] < 15.0: # extend the BIM dictionary - BIM.update( - dict( - RoofCover=roof_cover, - RoofDeckAttachmentW=RDA, - RoofDeckAttachmentM=MRDA, - RoofDeckAge=DQ, - RoofToWallConnection=RWC, - Shutters=shutters, - MasonryReinforcing=MR, - WindowAreaRatio=WIDD, - ) + bim.update( + { + 'RoofCover': roof_cover, + 'RoofDeckAttachmentW': rda, + 'RoofDeckAttachmentM': mrda, + 'RoofDeckAge': dq, + 'RoofToWallConnection': rwc, + 'Shutters': shutters, + 'MasonryReinforcing': mr, + 'WindowAreaRatio': widd, + } ) # if it's MLRM1, configure outputs @@ -197,24 +194,24 @@ def MLRM_config(BIM): f"M.LRM.1." f"{roof_cover}." f"{int(shutters)}." - f"{int(MR)}." - f"{WIDD}." - f"{BIM['RoofSystem']}." - f"{RDA}." - f"{RWC}." - f"{DQ}." - f"{MRDA}." - f"{int(BIM['TerrainRoughness'])}" + f"{int(mr)}." + f"{widd}." + f"{bim['RoofSystem']}." + f"{rda}." + f"{rwc}." + f"{dq}." + f"{mrda}." + f"{int(bim['TerrainRoughness'])}" ) else: unit_tag = 'null' # MLRM2 needs more rulesets - if BIM['RoofSystem'] == 'trs': - joist_spacing = 'null' - elif BIM['RoofSystem'] == 'ows': - if BIM['NumberOfUnits'] == 1: + if bim['RoofSystem'] == 'trs': + joist_spacing: int | str = 'null' + elif bim['RoofSystem'] == 'ows': + if bim['NumberOfUnits'] == 1: joist_spacing = 'null' unit_tag = 'sgl' else: @@ -222,34 +219,34 @@ def MLRM_config(BIM): unit_tag = 'mlt' # extend the BIM dictionary - BIM.update( - dict( - RoofCover=roof_cover, - RoofDeckAttachmentW=RDA, - RoofDeckAttachmentM=MRDA, - RoofDeckAge=DQ, - RoofToWallConnection=RWC, - Shutters=shutters, - MasonryReinforcing=MR, - WindDebrisClass=WIDD, - UnitType=unit_tag, - ) + bim.update( + { + 'RoofCover': roof_cover, + 'RoofDeckAttachmentW': rda, + 'RoofDeckAttachmentM': mrda, + 'RoofDeckAge': dq, + 'RoofToWallConnection': rwc, + 'Shutters': shutters, + 'MasonryReinforcing': mr, + 'WindDebrisClass': widd, + 'UnitType': unit_tag, + } ) bldg_config = ( f"M.LRM.2." f"{roof_cover}." f"{int(shutters)}." - f"{int(MR)}." - f"{WIDD}." - f"{BIM['RoofSystem']}." - f"{RDA}." - f"{RWC}." - f"{DQ}." - f"{MRDA}." + f"{int(mr)}." + f"{widd}." + f"{bim['RoofSystem']}." + f"{rda}." + f"{rwc}." + f"{dq}." + f"{mrda}." f"{unit_tag}." f"{joist_spacing}." - f"{int(BIM['TerrainRoughness'])}" + f"{int(bim['TerrainRoughness'])}" ) return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindMMUHRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMMUHRulesets.py index 556dfe16d..83ded54f1 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMMUHRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMMUHRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -43,13 +42,13 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import random import datetime +import random -def MMUH_config(BIM): +def MMUH_config(bim: dict) -> str: # noqa: C901 """ - Rules to identify a HAZUS MMUH configuration based on BIM data + Rules to identify a HAZUS MMUH configuration based on BIM data. Parameters ---------- @@ -59,25 +58,25 @@ def MMUH_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Secondary Water Resistance (SWR) # Minimum drainage recommendations are in place in NJ (See below). # However, SWR indicates a code-plus practice. - SWR = "null" # Default - if BIM['RoofShape'] == 'flt': - SWR = 'null' - elif BIM['RoofShape'] in ['hip', 'gab']: - SWR = int(random.random() < 0.6) + swr: int | str = 'null' # Default + if bim['RoofShape'] == 'flt': + swr = 'null' + elif bim['RoofShape'] in {'hip', 'gab'}: + swr = int(random.random() < 0.6) # Roof cover & Roof quality # Roof cover and quality do not apply to gable and hip roofs - if BIM['RoofShape'] in ['gab', 'hip']: + if bim['RoofShape'] in {'gab', 'hip'}: roof_cover = 'null' roof_quality = 'null' @@ -94,26 +93,29 @@ def MMUH_config(BIM): # We assume that all flat roofs built before 1975 are BURs and all roofs # built after 1975 are SPMs. # Nothing in NJ Building Code or in the Hazus manual specifies what - # constitutes “good” and “poor” roof conditions, so ruleset is dependant + # constitutes “good” and “poor” roof conditions, so ruleset is dependent # on the age of the roof and average lifespan of BUR and SPM roofs. # We assume that the average lifespan of a BUR roof is 30 years and the # average lifespan of a SPM is 35 years. Therefore, BURs installed before # 1990 are in poor condition, and SPMs installed before 1985 are in poor # condition. + elif year >= 1975: + roof_cover = 'spm' + if bim['YearBuilt'] >= ( + datetime.datetime.now(tz=datetime.timezone.utc).year - 35 + ): + roof_quality = 'god' + else: + roof_quality = 'por' else: - if year >= 1975: - roof_cover = 'spm' - if BIM['YearBuilt'] >= (datetime.datetime.now().year - 35): - roof_quality = 'god' - else: - roof_quality = 'por' + # year < 1975 + roof_cover = 'bur' + if bim['YearBuilt'] >= ( + datetime.datetime.now(tz=datetime.timezone.utc).year - 30 + ): + roof_quality = 'god' else: - # year < 1975 - roof_cover = 'bur' - if BIM['YearBuilt'] >= (datetime.datetime.now().year - 30): - roof_quality = 'god' - else: - roof_quality = 'por' + roof_quality = 'por' # Roof Deck Attachment (RDA) # IRC 2009-2015: @@ -128,22 +130,21 @@ def MMUH_config(BIM): # roughness length in the ruleset herein. # The base rule was then extended to the exposures closest to suburban and # light suburban, even though these are not considered by the code. - if BIM['TerrainRoughness'] >= 35: # suburban or light trees - if BIM['V_ult'] > 130.0: - RDA = '8s' # 8d @ 6"/6" 'D' + if bim['TerrainRoughness'] >= 35: # suburban or light trees + if bim['V_ult'] > 130.0: + rda = '8s' # 8d @ 6"/6" 'D' else: - RDA = '8d' # 8d @ 6"/12" 'B' - else: # light suburban or open - if BIM['V_ult'] > 110.0: - RDA = '8s' # 8d @ 6"/6" 'D' - else: - RDA = '8d' # 8d @ 6"/12" 'B' + rda = '8d' # 8d @ 6"/12" 'B' + elif bim['V_ult'] > 110.0: + rda = '8s' # 8d @ 6"/6" 'D' + else: + rda = '8d' # 8d @ 6"/12" 'B' # Roof-Wall Connection (RWC) - if BIM['V_ult'] > 110.0: - RWC = 'strap' # Strap + if bim['V_ult'] > 110.0: + rwc = 'strap' # Strap else: - RWC = 'tnail' # Toe-nail + rwc = 'tnail' # Toe-nail # Shutters # IRC 2000-2015: @@ -157,7 +158,7 @@ def MMUH_config(BIM): # corrosion and are able to resist component and cladding loads; # Earlier IRC editions provide similar rules. if year >= 2000: - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -167,49 +168,46 @@ def MMUH_config(BIM): # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. + elif bim['WindBorneDebris']: + shutters = random.random() < 0.46 else: - if BIM['WindBorneDebris']: - shutters = random.random() < 0.46 - else: - shutters = False + shutters = False # Masonry Reinforcing (MR) # R606.6.4.1.2 Metal Reinforcement states that walls other than interior # non-load-bearing walls shall be anchored at vertical intervals of not - # more than 8 inches with joint reinforcement of not less than 9 gage. + # more than 8 inches with joint reinforcement of not less than 9-gage. # Therefore this ruleset assumes that all exterior or load-bearing masonry # walls will have reinforcement. Since our considerations deal with wind # speed, I made the assumption that only exterior walls are being taken # into consideration. - MR = True + mr = True - stories = min(BIM['NumberOfStories'], 3) + stories = min(bim['NumberOfStories'], 3) # extend the BIM dictionary - BIM.update( - dict( - SecondaryWaterResistance=SWR, - RoofCover=roof_cover, - RoofQuality=roof_quality, - RoofDeckAttachmentW=RDA, - RoofToWallConnection=RWC, - Shutters=shutters, - MasonryReinforcing=MR, - ) + bim.update( + { + 'SecondaryWaterResistance': swr, + 'RoofCover': roof_cover, + 'RoofQuality': roof_quality, + 'RoofDeckAttachmentW': rda, + 'RoofToWallConnection': rwc, + 'Shutters': shutters, + 'MasonryReinforcing': mr, + } ) - bldg_config = ( + return ( f"M.MUH." f"{int(stories)}." - f"{BIM['RoofShape']}." - f"{int(SWR)}." + f"{bim['RoofShape']}." + f"{int(swr)}." f"{roof_cover}." f"{roof_quality}." - f"{RDA}." - f"{RWC}." + f"{rda}." + f"{rwc}." f"{int(shutters)}." - f"{int(MR)}." - f"{int(BIM['TerrainRoughness'])}" + f"{int(mr)}." + f"{int(bim['TerrainRoughness'])}" ) - - return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindMSFRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMSFRulesets.py index b8c8e1fbd..bb538715b 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMSFRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMSFRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -43,13 +42,13 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import random import datetime +import random -def MSF_config(BIM): +def MSF_config(bim: dict) -> str: # noqa: C901 """ - Rules to identify a HAZUS MSF configuration based on BIM data + Rules to identify a HAZUS MSF configuration based on BIM data. Parameters ---------- @@ -59,23 +58,23 @@ def MSF_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Roof-Wall Connection (RWC) - if BIM['HazardProneRegion']: - RWC = 'strap' # Strap + if bim['HazardProneRegion']: + rwc = 'strap' # Strap else: - RWC = 'tnail' # Toe-nail + rwc = 'tnail' # Toe-nail # Roof Frame Type - RFT = BIM['RoofSystem'] + rft = bim['RoofSystem'] # Story Flag - stories = min(BIM['NumberOfStories'], 2) + stories = min(bim['NumberOfStories'], 2) # Shutters # IRC 2000-2015: @@ -89,7 +88,7 @@ def MSF_config(BIM): # corrosion and are able to resist component and cladding loads; # Earlier IRC editions provide similar rules. if year >= 2000: - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -99,14 +98,12 @@ def MSF_config(BIM): # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. + elif bim['WindBorneDebris']: + shutters = random.random() < 0.45 else: - if BIM['WindBorneDebris']: - shutters = random.random() < 0.45 - else: - shutters = False - - if BIM['RoofSystem'] == 'trs': + shutters = False + if bim['RoofSystem'] == 'trs': # Roof Deck Attachment (RDA) # IRC codes: # NJ code requires 8d nails (with spacing 6”/12”) for sheathing thicknesses @@ -115,22 +112,22 @@ def MSF_config(BIM): # codes. Commentary for Table R602.3(1) indicates 8d nails with 6”/6” # spacing (enhanced roof spacing) for ultimate wind speeds greater than # a speed_lim. speed_lim depends on the year of construction - RDA = '6d' # Default (aka A) in Reorganized Rulesets - WIND + rda = '6d' # Default (aka A) in Reorganized Rulesets - WIND if year >= 2016: # IRC 2015 speed_lim = 130.0 # mph else: # IRC 2000 - 2009 speed_lim = 100.0 # mph - if BIM['V_ult'] > speed_lim: - RDA = '8s' # 8d @ 6"/6" ('D' in the Reorganized Rulesets - WIND) + if bim['V_ult'] > speed_lim: + rda = '8s' # 8d @ 6"/6" ('D' in the Reorganized Rulesets - WIND) else: - RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) + rda = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) # Secondary Water Resistance (SWR) # Minimum drainage recommendations are in place in NJ (See below). # However, SWR indicates a code-plus practice. - SWR = random.random() < 0.6 + swr: int | float | str = random.random() < 0.6 # Garage # As per IRC 2015: @@ -147,65 +144,60 @@ def MSF_config(BIM): # (and therefore do not have any strength requirements) that are older than # 30 years are considered to be weak, whereas those from the last 30 years # are considered to be standard. - if BIM['Garage'] == -1: + if bim['Garage'] == -1: # no garage data, using the default "none" garage = 'no' - else: - if year > (datetime.datetime.now().year - 30): - if BIM['Garage'] < 1: - garage = 'no' # None - else: - if shutters: - garage = 'sup' # SFBC 1994 - else: - garage = 'std' # Standard + elif year > (datetime.datetime.now(tz=datetime.timezone.utc).year - 30): + if bim['Garage'] < 1: + garage = 'no' # None + elif shutters: + garage = 'sup' # SFBC 1994 else: - # year <= current year - 30 - if BIM['Garage'] < 1: - garage = 'no' # None - else: - if shutters: - garage = 'sup' - else: - garage = 'wkd' # Weak + garage = 'std' # Standard + elif bim['Garage'] < 1: + garage = 'no' # None + elif shutters: + garage = 'sup' + else: + garage = 'wkd' # Weak # Masonry Reinforcing (MR) # R606.6.4.1.2 Metal Reinforcement states that walls other than interior # non-load-bearing walls shall be anchored at vertical intervals of not - # more than 8 inches with joint reinforcement of not less than 9 gage. + # more than 8 inches with joint reinforcement of not less than 9-gage. # Therefore this ruleset assumes that all exterior or load-bearing masonry # walls will have reinforcement. Since our considerations deal with wind # speed, I made the assumption that only exterior walls are being taken # into consideration. - MR = True + mr = True - stories = min(BIM['NumberOfStories'], 2) + stories = min(bim['NumberOfStories'], 2) # extend the BIM dictionary - BIM.update( - dict( - SecondaryWaterResistance=SWR, - RoofDeckAttachmentW=RDA, - RoofToWallConnection=RWC, - Shutters=shutters, - AugmentGarage=garage, - MasonryReinforcing=MR, - ) + bim.update( + { + 'SecondaryWaterResistance': swr, + 'RoofDeckAttachmentW': rda, + 'RoofToWallConnection': rwc, + 'Shutters': shutters, + 'AugmentGarage': garage, + 'MasonryReinforcing': mr, + } ) bldg_config = ( f"M.SF." f"{int(stories)}." - f"{BIM['RoofShape']}." - f"{RWC}." - f"{RFT}." - f"{RDA}." + f"{bim['RoofShape']}." + f"{rwc}." + f"{rft}." + f"{rda}." f"{int(shutters)}." - f"{int(SWR)}." + f"{int(swr)}." f"{garage}." - f"{int(MR)}." + f"{int(mr)}." f"null." - f"{int(BIM['TerrainRoughness'])}" + f"{int(bim['TerrainRoughness'])}" ) else: @@ -213,7 +205,7 @@ def MSF_config(BIM): # r # A 2015 study found that there were 750,000 metal roof installed in 2015, # out of 5 million new roofs in the US annually. If these numbers stay - # relatively stable, that implies that roughtly 15% of roofs are smlt. + # relatively stable, that implies that roughly 15% of roofs are smlt. # ref. link: https://www.bdcnetwork.com/blog/metal-roofs-are-soaring- # popularity-residential-marmet roof_cover_options = ['smtl', 'cshl'] @@ -224,51 +216,54 @@ def MSF_config(BIM): # high wind attachments are required for DSWII > 142 mph # NJ IBC 1507.4.5 (for smtl) # high wind attachment are required for DSWII > 142 mph - if BIM['V_ult'] > 142.0: - RDA = 'sup' # superior + if bim['V_ult'] > 142.0: + rda = 'sup' # superior else: - RDA = 'std' # standard + rda = 'std' # standard # Secondary Water Resistance (SWR) # Minimum drainage recommendations are in place in NJ (See below). # However, SWR indicates a code-plus practice. - SWR = 'null' # Default - if BIM['RoofShape'] == 'flt': - SWR = int(True) + + # Default + swr = 'null' # type: ignore[no-redef] + + if bim['RoofShape'] == 'flt': + swr = int(True) # type: ignore[assignment] elif ( - (BIM['RoofShape'] in ['hip', 'gab']) + (bim['RoofShape'] in {'hip', 'gab'}) and (roof_cover == 'cshl') - and (RDA == 'sup') + and (rda == 'sup') ): - SWR = int(random.random() < 0.6) + swr = int(random.random() < 0.6) - stories = min(BIM['NumberOfStories'], 2) + stories = min(bim['NumberOfStories'], 2) # extend the BIM dictionary - BIM.update( - dict( - SecondaryWaterResistance=SWR, - RoofDeckAttachmentW=RDA, - RoofToWallConnection=RWC, - Shutters=shutters, - AugmentGarage=garage, - MasonryReinforcing=MR, - ) + bim.update( + { + 'SecondaryWaterResistance': swr, + 'RoofDeckAttachmentW': rda, + 'RoofToWallConnection': rwc, + 'Shutters': shutters, + 'AugmentGarage': garage, # type: ignore[used-before-def] + 'MasonryReinforcing': mr, # type: ignore[used-before-def] + } ) bldg_config = ( f"M.SF." f"{int(stories)}." - f"{BIM['RoofShape']}." - f"{RWC}." - f"{RFT}." - f"{RDA}." + f"{bim['RoofShape']}." + f"{rwc}." + f"{rft}." + f"{rda}." f"{int(shutters)}." - f"{SWR}." + f"{swr}." f"null." f"null." f"{roof_cover}." - f"{int(BIM['TerrainRoughness'])}" + f"{int(bim['TerrainRoughness'])}" ) return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindMetaVarRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMetaVarRulesets.py index 0ea56b739..af608140c 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMetaVarRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMetaVarRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -43,11 +42,13 @@ # Meredith Lockhead # Tracy Kijewski-Correa +from __future__ import annotations + import numpy as np import pandas as pd -def parse_BIM(BIM_in, location, hazards): +def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: C901, PLR0912 """ Parses the information provided in the AIM model. @@ -81,24 +82,23 @@ def parse_BIM(BIM_in, location, hazards): ------- BIM_ap: dictionary Parsed building characteristics. - """ + """ # check location - if location not in ['LA', 'NJ']: - print(f'WARNING: The provided location is not recognized: {location}') + if location not in {'LA', 'NJ'}: + print(f'WARNING: The provided location is not recognized: {location}') # noqa: T201 # check hazard for hazard in hazards: - if hazard not in ['wind', 'inundation']: - print(f'WARNING: The provided hazard is not recognized: {hazard}') + if hazard not in {'wind', 'inundation'}: + print(f'WARNING: The provided hazard is not recognized: {hazard}') # noqa: T201 # initialize the BIM dict - BIM_ap = BIM_in.copy() + bim_ap = bim_in.copy() if 'wind' in hazards: - # maps roof type to the internal representation - ap_RoofType = { + ap_roof_type = { 'hip': 'hip', 'hipped': 'hip', 'Hip': 'hip', @@ -109,8 +109,8 @@ def parse_BIM(BIM_in, location, hazards): 'Flat': 'flt', } # maps roof system to the internal representation - ap_RoofSyste = {'Wood': 'trs', 'OWSJ': 'ows', 'N/A': 'trs'} - roof_system = BIM_in.get('RoofSystem', 'Wood') + ap_roof_syste = {'Wood': 'trs', 'OWSJ': 'ows', 'N/A': 'trs'} + roof_system = bim_in.get('RoofSystem', 'Wood') if pd.isna(roof_system): roof_system = 'Wood' @@ -125,8 +125,8 @@ def parse_BIM(BIM_in, location, hazards): # maps for design level (Marginal Engineered is mapped to # Engineered as default) - ap_DesignLevel = {'E': 'E', 'NE': 'NE', 'PE': 'PE', 'ME': 'E'} - design_level = BIM_in.get('DesignLevel', 'E') + ap_design_level = {'E': 'E', 'NE': 'NE', 'PE': 'PE', 'ME': 'E'} + design_level = bim_in.get('DesignLevel', 'E') if pd.isna(design_level): design_level = 'E' @@ -137,11 +137,11 @@ def parse_BIM(BIM_in, location, hazards): alname_yearbuilt = ['YearBuiltNJDEP', 'yearBuilt', 'YearBuiltMODIV'] yearbuilt = None try: - yearbuilt = BIM_in['YearBuilt'] + yearbuilt = bim_in['YearBuilt'] except KeyError: for i in alname_yearbuilt: - if i in BIM_in.keys(): - yearbuilt = BIM_in[i] + if i in bim_in: + yearbuilt = bim_in[i] break # if none of the above works, set a default @@ -157,11 +157,11 @@ def parse_BIM(BIM_in, location, hazards): ] nstories = None try: - nstories = BIM_in['NumberOfStories'] - except Exception as e: + nstories = bim_in['NumberOfStories'] + except KeyError as e: for i in alname_nstories: - if i in BIM_in.keys(): - nstories = BIM_in[i] + if i in bim_in: + nstories = bim_in[i] break # if none of the above works, we need to raise an exception @@ -172,11 +172,11 @@ def parse_BIM(BIM_in, location, hazards): alname_area = ['area', 'PlanArea1', 'Area', 'PlanArea0'] area = None try: - area = BIM_in['PlanArea'] - except Exception as e: + area = bim_in['PlanArea'] + except KeyError as e: for i in alname_area: - if i in BIM_in.keys(): - area = BIM_in[i] + if i in bim_in: + area = bim_in[i] break # if none of the above works, we need to raise an exception @@ -186,21 +186,21 @@ def parse_BIM(BIM_in, location, hazards): # Design Wind Speed alname_dws = ['DSWII', 'DWSII', 'DesignWindSpeed'] - dws = BIM_in.get('DesignWindSpeed', None) + dws = bim_in.get('DesignWindSpeed') if dws is None: for alname in alname_dws: - if alname in BIM_in.keys(): - dws = BIM_in[alname] + if alname in bim_in: + dws = bim_in[alname] break alname_occupancy = ['occupancy'] oc = None try: - oc = BIM_in['OccupancyClass'] - except Exception as e: + oc = bim_in['OccupancyClass'] + except KeyError as e: for i in alname_occupancy: - if i in BIM_in.keys(): - oc = BIM_in[i] + if i in bim_in: + oc = bim_in[i] break # if none of the above works, we need to raise an exception @@ -212,7 +212,7 @@ def parse_BIM(BIM_in, location, hazards): oc = 'RES3A' # maps for flood zone - ap_FloodZone = { + ap_flood_zone = { # Coastal areas with a 1% or greater chance of flooding and an # additional hazard associated with storm waves. 6101: 'VE', @@ -232,15 +232,15 @@ def parse_BIM(BIM_in, location, hazards): 6115: 'NA', 6119: 'NA', } - if isinstance(BIM_in['FloodZone'], int): + if isinstance(bim_in['FloodZone'], int): # NJDEP code for flood zone (conversion to the FEMA designations) - floodzone_fema = ap_FloodZone[BIM_in['FloodZone']] + floodzone_fema = ap_flood_zone[bim_in['FloodZone']] else: # standard input should follow the FEMA flood zone designations - floodzone_fema = BIM_in['FloodZone'] + floodzone_fema = bim_in['FloodZone'] # maps for BuildingType - ap_BuildingType_NJ = { + ap_building_type_nj = { # Coastal areas with a 1% or greater chance of flooding and an # additional hazard associated with storm waves. 3001: 'Wood', @@ -251,61 +251,62 @@ def parse_BIM(BIM_in, location, hazards): } if location == 'NJ': # NJDEP code for flood zone needs to be converted - buildingtype = ap_BuildingType_NJ[BIM_in['BuildingType']] + buildingtype = ap_building_type_nj[bim_in['BuildingType']] elif location == 'LA': # standard input should provide the building type as a string - buildingtype = BIM_in['BuildingType'] + buildingtype = bim_in['BuildingType'] # first, pull in the provided data - BIM_ap.update( - dict( - OccupancyClass=str(oc), - BuildingType=buildingtype, - YearBuilt=int(yearbuilt), + bim_ap.update( + { + 'OccupancyClass': str(oc), + 'BuildingType': buildingtype, + 'YearBuilt': int(yearbuilt), # double check with Tracy for format - (NumberStories0 # is 4-digit code) # (NumberStories1 is image-processed story number) - NumberOfStories=int(nstories), - PlanArea=float(area), - FloodZone=floodzone_fema, - V_ult=float(dws), - AvgJanTemp=ap_ajt[BIM_in.get('AvgJanTemp', 'Below')], - RoofShape=ap_RoofType[BIM_in['RoofShape']], - RoofSlope=float(BIM_in.get('RoofSlope', 0.25)), # default 0.25 - SheathingThickness=float( - BIM_in.get('SheathingThick', 1.0) + 'NumberOfStories': int(nstories), + 'PlanArea': float(area), + 'FloodZone': floodzone_fema, + 'V_ult': float(dws), + 'AvgJanTemp': ap_ajt[bim_in.get('AvgJanTemp', 'Below')], + 'RoofShape': ap_roof_type[bim_in['RoofShape']], + 'RoofSlope': float(bim_in.get('RoofSlope', 0.25)), # default 0.25 + 'SheathingThickness': float( + bim_in.get('SheathingThick', 1.0) ), # default 1.0 - RoofSystem=str( - ap_RoofSyste[roof_system] + 'RoofSystem': str( + ap_roof_syste[roof_system] ), # only valid for masonry structures - Garage=float(BIM_in.get('Garage', -1.0)), - LULC=BIM_in.get('LULC', -1), - z0=float( - BIM_in.get('z0', -1) + 'Garage': float(bim_in.get('Garage', -1.0)), + 'LULC': bim_in.get('LULC', -1), + 'z0': float( + bim_in.get('z0', -1) ), # if the z0 is already in the input file - Terrain=BIM_in.get('Terrain', -1), - MeanRoofHt=float(BIM_in.get('MeanRoofHt', 15.0)), # default 15 - DesignLevel=str(ap_DesignLevel[design_level]), # default engineered - WindowArea=float(BIM_in.get('WindowArea', 0.20)), - WoodZone=str(BIM_in.get('WindZone', 'I')), - ) + 'Terrain': bim_in.get('Terrain', -1), + 'MeanRoofHt': float(bim_in.get('MeanRoofHt', 15.0)), # default 15 + 'DesignLevel': str( + ap_design_level[design_level] + ), # default engineered + 'WindowArea': float(bim_in.get('WindowArea', 0.20)), + 'WoodZone': str(bim_in.get('WindZone', 'I')), + } ) if 'inundation' in hazards: - # maps for split level - ap_SplitLevel = {'NO': 0, 'YES': 1} + ap_split_level = {'NO': 0, 'YES': 1} - foundation = BIM_in.get('FoundationType', 3501) + foundation = bim_in.get('FoundationType', 3501) if pd.isna(foundation): foundation = 3501 - nunits = BIM_in.get('NoUnits', 1) + nunits = bim_in.get('NoUnits', 1) if pd.isna(nunits): nunits = 1 # maps for flood zone - ap_FloodZone = { + ap_flood_zone = { # Coastal areas with a 1% or greater chance of flooding and an # additional hazard associated with storm waves. 6101: 'VE', @@ -325,42 +326,43 @@ def parse_BIM(BIM_in, location, hazards): 6115: 'NA', 6119: 'NA', } - if isinstance(BIM_in['FloodZone'], int): + if isinstance(bim_in['FloodZone'], int): # NJDEP code for flood zone (conversion to the FEMA designations) - floodzone_fema = ap_FloodZone[BIM_in['FloodZone']] + floodzone_fema = ap_flood_zone[bim_in['FloodZone']] else: # standard input should follow the FEMA flood zone designations - floodzone_fema = BIM_in['FloodZone'] + floodzone_fema = bim_in['FloodZone'] # add the parsed data to the BIM dict - BIM_ap.update( - dict( - DesignLevel=str(ap_DesignLevel[design_level]), # default engineered - NumberOfUnits=int(nunits), - FirstFloorElevation=float(BIM_in.get('FirstFloorHt1', 10.0)), - SplitLevel=bool( - ap_SplitLevel[BIM_in.get('SplitLevel', 'NO')] + bim_ap.update( + { + 'DesignLevel': str( + ap_design_level[design_level] + ), # default engineered + 'NumberOfUnits': int(nunits), + 'FirstFloorElevation': float(bim_in.get('FirstFloorHt1', 10.0)), + 'SplitLevel': bool( + ap_split_level[bim_in.get('SplitLevel', 'NO')] ), # dfault: no - FoundationType=int(foundation), # default: pile - City=BIM_in.get('City', 'NA'), - FloodZone=str(floodzone_fema), - ) + 'FoundationType': int(foundation), # default: pile + 'City': bim_in.get('City', 'NA'), + 'FloodZone': str(floodzone_fema), + } ) # add inferred, generic meta-variables if 'wind' in hazards: - # Hurricane-Prone Region (HRP) # Areas vulnerable to hurricane, defined as the U.S. Atlantic Ocean and # Gulf of Mexico coasts where the ultimate design wind speed, V_ult is # greater than a pre-defined limit. - if BIM_ap['YearBuilt'] >= 2016: + if bim_ap['YearBuilt'] >= 2016: # The limit is 115 mph in IRC 2015 - HPR = BIM_ap['V_ult'] > 115.0 + hpr = bim_ap['V_ult'] > 115.0 else: # The limit is 90 mph in IRC 2009 and earlier versions - HPR = BIM_ap['V_ult'] > 90.0 + hpr = bim_ap['V_ult'] > 90.0 # Wind Borne Debris # Areas within hurricane-prone regions are affected by debris if one of @@ -370,7 +372,7 @@ def parse_BIM(BIM_in, location, hazards): # (2) In areas where the ultimate design wind speed is greater than # general_lim # The flood_lim and general_lim limits depend on the year of construction - if BIM_ap['YearBuilt'] >= 2016: + if bim_ap['YearBuilt'] >= 2016: # In IRC 2015: flood_lim = 130.0 # mph general_lim = 140.0 # mph @@ -384,16 +386,16 @@ def parse_BIM(BIM_in, location, hazards): # where the ultimate design wind speed is 130 mph (58m/s) or greater. # (2) In areas where the ultimate design wind speed is 140 mph (63.5m/s) # or greater. (Definitions: Chapter 2, 2015 NJ Residential Code) - if not HPR: - WBD = False + if not hpr: + wbd = False else: - WBD = ( + wbd = ( ( - BIM_ap['FloodZone'].startswith('A') - or BIM_ap['FloodZone'].startswith('V') + bim_ap['FloodZone'].startswith('A') + or bim_ap['FloodZone'].startswith('V') ) - and BIM_ap['V_ult'] >= flood_lim - ) or (BIM_ap['V_ult'] >= general_lim) + and bim_ap['V_ult'] >= flood_lim + ) or (bim_ap['V_ult'] >= general_lim) # Terrain # open (0.03) = 3 @@ -404,7 +406,7 @@ def parse_BIM(BIM_in, location, hazards): # Mapped to Land Use Categories in NJ (see https://www.state.nj.us/dep/gis/ # digidownload/metadata/lulc02/anderson2002.html) by T. Wu group # (see internal report on roughness calculations, Table 4). - # These are mapped to Hazus defintions as follows: + # These are mapped to Hazus definitions as follows: # Open Water (5400s) with zo=0.01 and barren land (7600) with # zo=0.04 assume Open Open Space Developed, Low Intensity # Developed, Medium Intensity Developed (1110-1140) assumed @@ -417,74 +419,74 @@ def parse_BIM(BIM_in, location, hazards): # Herbaceous Wetlands (6240) with zo=0.03 assume Open # Note: HAZUS category of trees (1.00) does not apply to any LU/LC in NJ terrain = 15 # Default in Reorganized Rulesets - WIND - LULC = BIM_ap['LULC'] - TER = BIM_ap['Terrain'] - if BIM_ap['z0'] > 0: - terrain = int(100 * BIM_ap['z0']) - elif LULC > 0: - if BIM_ap['FloodZone'].startswith('V') or BIM_ap['FloodZone'] in [ + lulc = bim_ap['LULC'] + terrain = bim_ap['Terrain'] + if bim_ap['z0'] > 0: + terrain = int(100 * bim_ap['z0']) + elif lulc > 0: + if bim_ap['FloodZone'].startswith('V') or bim_ap['FloodZone'] in { 'A', 'AE', 'A1-30', 'AR', 'A99', - ]: + }: terrain = 3 - elif (LULC >= 5000) and (LULC <= 5999): - terrain = 3 # Open - elif ((LULC == 4400) or (LULC == 6240)) or (LULC == 7600): + elif ((lulc >= 5000) and (lulc <= 5999)) or ( + (lulc in {4400, 6240}) or (lulc == 7600) + ): terrain = 3 # Open - elif (LULC >= 2000) and (LULC <= 2999): + elif (lulc >= 2000) and (lulc <= 2999): terrain = 15 # Light suburban - elif ((LULC >= 1110) and (LULC <= 1140)) or ( - (LULC >= 6250) and (LULC <= 6252) + elif ((lulc >= 1110) and (lulc <= 1140)) or ( + (lulc >= 6250) and (lulc <= 6252) ): terrain = 35 # Suburban - elif ((LULC >= 4100) and (LULC <= 4300)) or (LULC == 1600): + elif ((lulc >= 4100) and (lulc <= 4300)) or (lulc == 1600): terrain = 70 # light trees - elif TER > 0: - if BIM_ap['FloodZone'].startswith('V') or BIM_ap['FloodZone'] in [ + elif terrain > 0: + if bim_ap['FloodZone'].startswith('V') or bim_ap['FloodZone'] in { 'A', 'AE', 'A1-30', 'AR', 'A99', - ]: + }: terrain = 3 - elif (TER >= 50) and (TER <= 59): - terrain = 3 # Open - elif ((TER == 44) or (TER == 62)) or (TER == 76): + elif ((terrain >= 50) and (terrain <= 59)) or ( + (terrain in {44, 62}) or (terrain == 76) + ): terrain = 3 # Open - elif (TER >= 20) and (TER <= 29): + elif (terrain >= 20) and (terrain <= 29): terrain = 15 # Light suburban - elif (TER == 11) or (TER == 61): + elif terrain in {11, 61}: terrain = 35 # Suburban - elif ((TER >= 41) and (TER <= 43)) or (TER in [16, 17]): + elif ((terrain >= 41) and (terrain <= 43)) or (terrain in {16, 17}): terrain = 70 # light trees - BIM_ap.update( - dict( + bim_ap.update( + { # Nominal Design Wind Speed # Former term was “Basic Wind Speed”; it is now the “Nominal Design # Wind Speed (V_asd). Unit: mph." - V_asd=np.sqrt(0.6 * BIM_ap['V_ult']), - HazardProneRegion=HPR, - WindBorneDebris=WBD, - TerrainRoughness=terrain, - ) + 'V_asd': np.sqrt(0.6 * bim_ap['V_ult']), + 'HazardProneRegion': hpr, + 'WindBorneDebris': wbd, + 'TerrainRoughness': terrain, + } ) if 'inundation' in hazards: - - BIM_ap.update( - dict( + bim_ap.update( + { # Flood Risk # Properties in the High Water Zone (within 1 mile of # the coast) are at risk of flooding and other # wind-borne debris action. - # TODO: need high water zone for this and move it to inputs! - FloodRisk=True, - ) + # TODO: need high water zone for this and move it to # noqa: TD002 + # inputs! + 'FloodRisk': True, + } ) - return BIM_ap + return bim_ap diff --git a/pelicun/tests/dl_calculation/rulesets/WindSECBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindSECBRulesets.py index db68ad03f..b95effa50 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindSECBRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindSECBRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -46,9 +45,9 @@ import random -def SECB_config(BIM): +def SECB_config(bim: dict) -> str: # noqa: C901 """ - Rules to identify a HAZUS SECB configuration based on BIM data + Rules to identify a HAZUS SECB configuration based on BIM data. Parameters ---------- @@ -58,26 +57,25 @@ def SECB_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling + A string that identifies a specific configuration within this building class. - """ - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Roof cover - if BIM['RoofShape'] in ['gab', 'hip']: + if bim['RoofShape'] in {'gab', 'hip'}: roof_cover = 'bur' # Warning: HAZUS does not have N/A option for CECB, so here we use bur + elif year >= 1975: + roof_cover = 'spm' else: - if year >= 1975: - roof_cover = 'spm' - else: - # year < 1975 - roof_cover = 'bur' + # year < 1975 + roof_cover = 'bur' # shutters if year >= 2000: - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -87,68 +85,65 @@ def SECB_config(BIM): # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. + elif bim['WindBorneDebris']: + shutters = random.random() < 0.46 else: - if BIM['WindBorneDebris']: - shutters = random.random() < 0.46 - else: - shutters = False + shutters = False # Wind Debris (widd in HAZSU) # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None - WIDD = 'C' # residential (default) - if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D']: - WIDD = 'C' # residential - elif BIM['OccupancyClass'] == 'AGR1': - WIDD = 'D' # None + widd = 'C' # residential (default) + if bim['OccupancyClass'] in {'RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D'}: + widd = 'C' # residential + elif bim['OccupancyClass'] == 'AGR1': + widd = 'D' # None else: - WIDD = 'A' # Res/Comm + widd = 'A' # Res/Comm # Window area ratio - if BIM['WindowArea'] < 0.33: - WWR = 'low' - elif BIM['WindowArea'] < 0.5: - WWR = 'med' + if bim['WindowArea'] < 0.33: + wwr = 'low' + elif bim['WindowArea'] < 0.5: + wwr = 'med' else: - WWR = 'hig' + wwr = 'hig' # Metal RDA # 1507.2.8.1 High Wind Attachment. # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with - # the manufacturer’s instructions. Fasteners are to be applied along + # the manufacturer's instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. - if BIM['V_ult'] > 142: - MRDA = 'std' # standard + if bim['V_ult'] > 142: + mrda = 'std' # standard else: - MRDA = 'sup' # superior + mrda = 'sup' # superior - if BIM['NumberOfStories'] <= 2: + if bim['NumberOfStories'] <= 2: bldg_tag = 'S.ECB.L' - elif BIM['NumberOfStories'] <= 5: + elif bim['NumberOfStories'] <= 5: bldg_tag = 'S.ECB.M' else: bldg_tag = 'S.ECB.H' # extend the BIM dictionary - BIM.update( - dict( - RoofCover=roof_cover, - WindowAreaRatio=WWR, - RoofDeckAttachmentM=MRDA, - Shutters=shutters, - WindDebrisClass=WIDD, - ) + bim.update( + { + 'RoofCover': roof_cover, + 'WindowAreaRatio': wwr, + 'RoofDeckAttachmentM': mrda, + 'Shutters': shutters, + 'WindDebrisClass': widd, + } ) - bldg_config = ( + return ( f"{bldg_tag}." f"{roof_cover}." f"{int(shutters)}." - f"{WIDD}." - f"{MRDA}." - f"{WWR}." - f"{int(BIM['TerrainRoughness'])}" + f"{widd}." + f"{mrda}." + f"{wwr}." + f"{int(bim['TerrainRoughness'])}" ) - - return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindSERBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindSERBRulesets.py index 6c078dd15..942fd7a7b 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindSERBRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindSERBRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -46,9 +45,9 @@ import random -def SERB_config(BIM): +def SERB_config(bim: dict) -> str: # noqa: C901 """ - Rules to identify a HAZUS SERB configuration based on BIM data + Rules to identify a HAZUS SERB configuration based on BIM data. Parameters ---------- @@ -58,26 +57,25 @@ def SERB_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling + A string that identifies a specific configuration within this building class. - """ - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Roof cover - if BIM['RoofShape'] in ['gab', 'hip']: + if bim['RoofShape'] in {'gab', 'hip'}: roof_cover = 'bur' # Warning: HAZUS does not have N/A option for CECB, so here we use bur + elif year >= 1975: + roof_cover = 'spm' else: - if year >= 1975: - roof_cover = 'spm' - else: - # year < 1975 - roof_cover = 'bur' + # year < 1975 + roof_cover = 'bur' # shutters if year >= 2000: - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -87,68 +85,65 @@ def SERB_config(BIM): # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. + elif bim['WindBorneDebris']: + shutters = random.random() < 0.46 else: - if BIM['WindBorneDebris']: - shutters = random.random() < 0.46 - else: - shutters = False + shutters = False # Wind Debris (widd in HAZSU) # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None - WIDD = 'C' # residential (default) - if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D']: - WIDD = 'C' # residential - elif BIM['OccupancyClass'] == 'AGR1': - WIDD = 'D' # None + widd = 'C' # residential (default) + if bim['OccupancyClass'] in {'RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D'}: + widd = 'C' # residential + elif bim['OccupancyClass'] == 'AGR1': + widd = 'D' # None else: - WIDD = 'A' # Res/Comm + widd = 'A' # Res/Comm # Window area ratio - if BIM['WindowArea'] < 0.33: - WWR = 'low' - elif BIM['WindowArea'] < 0.5: - WWR = 'med' + if bim['WindowArea'] < 0.33: + wwr = 'low' + elif bim['WindowArea'] < 0.5: + wwr = 'med' else: - WWR = 'hig' + wwr = 'hig' # Metal RDA # 1507.2.8.1 High Wind Attachment. # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with - # the manufacturer’s instructions. Fasteners are to be applied along + # the manufacturer's instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. - if BIM['V_ult'] > 142: - MRDA = 'std' # standard + if bim['V_ult'] > 142: + mrda = 'std' # standard else: - MRDA = 'sup' # superior + mrda = 'sup' # superior - if BIM['NumberOfStories'] <= 2: + if bim['NumberOfStories'] <= 2: bldg_tag = 'S.ERB.L' - elif BIM['NumberOfStories'] <= 5: + elif bim['NumberOfStories'] <= 5: bldg_tag = 'S.ERB.M' else: bldg_tag = 'S.ERB.H' # extend the BIM dictionary - BIM.update( - dict( - RoofCover=roof_cover, - WindowAreaRatio=WWR, - RoofDeckAttachmentM=MRDA, - Shutters=shutters, - WindDebrisClass=WIDD, - ) + bim.update( + { + 'RoofCover': roof_cover, + 'WindowAreaRatio': wwr, + 'RoofDeckAttachmentM': mrda, + 'Shutters': shutters, + 'WindDebrisClass': widd, + } ) - bldg_config = ( + return ( f"{bldg_tag}." f"{roof_cover}." f"{int(shutters)}." - f"{WIDD}." - f"{MRDA}." - f"{WWR}." - f"{int(BIM['TerrainRoughness'])}" + f"{widd}." + f"{mrda}." + f"{wwr}." + f"{int(bim['TerrainRoughness'])}" ) - - return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindSPMBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindSPMBRulesets.py index 58b3e2b6a..16c653833 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindSPMBRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindSPMBRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -43,13 +42,13 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import random import datetime +import random -def SPMB_config(BIM): +def SPMB_config(bim: dict) -> str: """ - Rules to identify a HAZUS SPMB configuration based on BIM data + Rules to identify a HAZUS SPMB configuration based on BIM data. Parameters ---------- @@ -59,21 +58,23 @@ def SPMB_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Roof Deck Age (~ Roof Quality) - if BIM['YearBuilt'] >= (datetime.datetime.now().year - 50): + if bim['YearBuilt'] >= ( + datetime.datetime.now(tz=datetime.timezone.utc).year - 50 + ): roof_quality = 'god' else: roof_quality = 'por' # shutters if year >= 2000: - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -83,42 +84,43 @@ def SPMB_config(BIM): # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. + elif bim['WindBorneDebris']: + shutters = random.random() < 0.46 else: - if BIM['WindBorneDebris']: - shutters = random.random() < 0.46 - else: - shutters = False + shutters = False # Metal RDA # 1507.2.8.1 High Wind Attachment. # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with - # the manufacturer’s instructions. Fasteners are to be applied along + # the manufacturer's instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. - if BIM['V_ult'] > 142: - MRDA = 'std' # standard + if bim['V_ult'] > 142: + mrda = 'std' # standard else: - MRDA = 'sup' # superior + mrda = 'sup' # superior - if BIM['PlanArea'] <= 4000: + if bim['PlanArea'] <= 4000: bldg_tag = 'S.PMB.S' - elif BIM['PlanArea'] <= 50000: + elif bim['PlanArea'] <= 50000: bldg_tag = 'S.PMB.M' else: bldg_tag = 'S.PMB.L' # extend the BIM dictionary - BIM.update( - dict(RoofQuality=roof_quality, RoofDeckAttachmentM=MRDA, Shutters=shutters) + bim.update( + { + 'RoofQuality': roof_quality, + 'RoofDeckAttachmentM': mrda, + 'Shutters': shutters, + } ) - bldg_config = ( + return ( f"{bldg_tag}." f"{int(shutters)}." f"{roof_quality}." - f"{MRDA}." - f"{int(BIM['TerrainRoughness'])}" + f"{mrda}." + f"{int(bim['TerrainRoughness'])}" ) - - return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindWMUHRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindWMUHRulesets.py index 23c62c341..9de71dc26 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindWMUHRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindWMUHRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -43,13 +42,13 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import random import datetime +import random -def WMUH_config(BIM): +def WMUH_config(bim: dict) -> str: # noqa: C901 """ - Rules to identify a HAZUS WMUH configuration based on BIM data + Rules to identify a HAZUS WMUH configuration based on BIM data. Parameters ---------- @@ -59,37 +58,35 @@ def WMUH_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Secondary Water Resistance (SWR) - SWR = 0 # Default + swr: str | int = 0 # Default if year > 2000: - if BIM['RoofShape'] == 'flt': - SWR = 'null' # because SWR is not a question for flat roofs - elif BIM['RoofShape'] in ['gab', 'hip']: - SWR = int(random.random() < 0.6) + if bim['RoofShape'] == 'flt': + swr = 'null' # because SWR is not a question for flat roofs + elif bim['RoofShape'] in {'gab', 'hip'}: + swr = int(random.random() < 0.6) elif year > 1987: - if BIM['RoofShape'] == 'flt': - SWR = 'null' # because SWR is not a question for flat roofs - elif (BIM['RoofShape'] == 'gab') or (BIM['RoofShape'] == 'hip'): - if BIM['RoofSlope'] < 0.33: - SWR = int(True) + if bim['RoofShape'] == 'flt': + swr = 'null' # because SWR is not a question for flat roofs + elif (bim['RoofShape'] == 'gab') or (bim['RoofShape'] == 'hip'): + if bim['RoofSlope'] < 0.33: + swr = int(True) else: - SWR = int(BIM['AvgJanTemp'] == 'below') + swr = int(bim['AvgJanTemp'] == 'below') + elif bim['RoofShape'] == 'flt': + swr = 'null' # because SWR is not a question for flat roofs else: - # year <= 1987 - if BIM['RoofShape'] == 'flt': - SWR = 'null' # because SWR is not a question for flat roofs - else: - SWR = int(random.random() < 0.3) + swr = int(random.random() < 0.3) # Roof cover & Roof quality # Roof cover and quality do not apply to gable and hip roofs - if BIM['RoofShape'] in ['gab', 'hip']: + if bim['RoofShape'] in {'gab', 'hip'}: roof_cover = 'null' roof_quality = 'null' # NJ Building Code Section 1507 (in particular 1507.10 and 1507.12) address @@ -105,26 +102,29 @@ def WMUH_config(BIM): # We assume that all flat roofs built before 1975 are BURs and all roofs # built after 1975 are SPMs. # Nothing in NJ Building Code or in the Hazus manual specifies what - # constitutes “good” and “poor” roof conditions, so ruleset is dependant + # constitutes “good” and “poor” roof conditions, so ruleset is dependent # on the age of the roof and average lifespan of BUR and SPM roofs. # We assume that the average lifespan of a BUR roof is 30 years and the # average lifespan of a SPM is 35 years. Therefore, BURs installed before # 1990 are in poor condition, and SPMs installed before 1985 are in poor # condition. + elif year >= 1975: + roof_cover = 'spm' + if bim['YearBuilt'] >= ( + datetime.datetime.now(tz=datetime.timezone.utc).year - 35 + ): + roof_quality = 'god' + else: + roof_quality = 'por' else: - if year >= 1975: - roof_cover = 'spm' - if BIM['YearBuilt'] >= (datetime.datetime.now().year - 35): - roof_quality = 'god' - else: - roof_quality = 'por' + # year < 1975 + roof_cover = 'bur' + if bim['YearBuilt'] >= ( + datetime.datetime.now(tz=datetime.timezone.utc).year - 30 + ): + roof_quality = 'god' else: - # year < 1975 - roof_cover = 'bur' - if BIM['YearBuilt'] >= (datetime.datetime.now().year - 30): - roof_quality = 'god' - else: - roof_quality = 'por' + roof_quality = 'por' # Roof Deck Attachment (RDA) # IRC 2009-2015: @@ -140,16 +140,15 @@ def WMUH_config(BIM): # The base rule was then extended to the exposures closest to suburban and # light suburban, even though these are not considered by the code. if year > 2009: - if BIM['TerrainRoughness'] >= 35: # suburban or light trees - if BIM['V_ult'] > 168.0: - RDA = '8s' # 8d @ 6"/6" 'D' - else: - RDA = '8d' # 8d @ 6"/12" 'B' - else: # light suburban or open - if BIM['V_ult'] > 142.0: - RDA = '8s' # 8d @ 6"/6" 'D' + if bim['TerrainRoughness'] >= 35: # suburban or light trees + if bim['V_ult'] > 168.0: + rda = '8s' # 8d @ 6"/6" 'D' else: - RDA = '8d' # 8d @ 6"/12" 'B' + rda = '8d' # 8d @ 6"/12" 'B' + elif bim['V_ult'] > 142.0: + rda = '8s' # 8d @ 6"/6" 'D' + else: + rda = '8d' # 8d @ 6"/12" 'B' # IRC 2000-2006: # Table 2304.9.1, Line 31 of the 2006 # NJ IBC requires 8d nails (with spacing 6”/12”) for sheathing thicknesses @@ -159,49 +158,47 @@ def WMUH_config(BIM): # change of connector at a certain wind speed. # Thus, all RDAs are assumed to be 8d @ 6”/12”. elif year > 2000: - RDA = '8d' # 8d @ 6"/12" 'B' + rda = '8d' # 8d @ 6"/12" 'B' # BOCA 1996: # The BOCA 1996 Building Code Requires 8d nails (with spacing 6”/12”) for # roof sheathing thickness up to 1". See Table 2305.2, Section 4. # Attachment requirements are given based on sheathing thickness, basic # wind speed, and the mean roof height of the building. elif year > 1996: - if (BIM['V_ult'] >= 103) and (BIM['MeanRoofHt'] >= 25.0): - RDA = '8s' # 8d @ 6"/6" 'D' + if (bim['V_ult'] >= 103) and (bim['MeanRoofHt'] >= 25.0): + rda = '8s' # 8d @ 6"/6" 'D' else: - RDA = '8d' # 8d @ 6"/12" 'B' + rda = '8d' # 8d @ 6"/12" 'B' # BOCA 1993: # The BOCA 1993 Building Code Requires 8d nails (with spacing 6”/12”) for # sheathing thicknesses of 19/32 inches or greater, and 6d nails (with # spacing 6”/12”) for sheathing thicknesses of ½ inches or less. # See Table 2305.2, Section 4. elif year > 1993: - if BIM['SheathingThickness'] <= 0.5: - RDA = '6d' # 6d @ 6"/12" 'A' + if bim['SheathingThickness'] <= 0.5: + rda = '6d' # 6d @ 6"/12" 'A' else: - RDA = '8d' # 8d @ 6"/12" 'B' + rda = '8d' # 8d @ 6"/12" 'B' + elif bim['SheathingThickness'] <= 0.5: + rda = '6d' # 6d @ 6"/12" 'A' else: - # year <= 1993 - if BIM['SheathingThickness'] <= 0.5: - RDA = '6d' # 6d @ 6"/12" 'A' - else: - RDA = '8d' # 8d @ 6"/12" 'B' + rda = '8d' # 8d @ 6"/12" 'B' # Roof-Wall Connection (RWC) # IRC 2000-2015: # 1507.2.8.1 High Wind Attachment. Underlayment applied in areas subject # to high winds (Vasd greater than 110 mph as determined in accordance # with Section 1609.3.1) shall be applied with corrosion-resistant - # fasteners in accordance with the manufacturer’s instructions. Fasteners + # fasteners in accordance with the manufacturer's instructions. Fasteners # are to be applied along the overlap not more than 36 inches on center. # Underlayment installed where Vasd, in accordance with section 1609.3.1 # equals or exceeds 120 mph shall be attached in a grid pattern of 12 # inches between side laps with a 6-inch spacing at the side laps. if year > 2000: - if BIM['V_ult'] > 142.0: - RWC = 'strap' # Strap + if bim['V_ult'] > 142.0: + rwc = 'strap' # Strap else: - RWC = 'tnail' # Toe-nail + rwc = 'tnail' # Toe-nail # BOCA 1996 and earlier: # There is no mention of straps or enhanced tie-downs of any kind in the # BOCA codes, and there is no description of these adoptions in IBHS @@ -214,7 +211,7 @@ def WMUH_config(BIM): # codes, it is assumed that New Jersey did not adopt these standards until # the 2000 IBC. else: - RWC = 'tnail' # Toe-nail + rwc = 'tnail' # Toe-nail # Shutters # IRC 2000-2015: @@ -229,7 +226,7 @@ def WMUH_config(BIM): # are classified as a Group R-3 or R-4 occupancy. # Earlier IRC editions provide similar rules. if year >= 2000: - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -239,39 +236,36 @@ def WMUH_config(BIM): # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. + elif bim['WindBorneDebris']: + shutters = random.random() < 0.46 else: - if BIM['WindBorneDebris']: - shutters = random.random() < 0.46 - else: - shutters = False + shutters = False # Stories # Buildings with more than 3 stories are mapped to the 3-story configuration - stories = min(BIM['NumberOfStories'], 3) + stories = min(bim['NumberOfStories'], 3) # extend the BIM dictionary - BIM.update( - dict( - SecondaryWaterResistance=SWR, - RoofCover=roof_cover, - RoofQuality=roof_quality, - RoofDeckAttachmentW=RDA, - RoofToWallConnection=RWC, - Shutters=shutters, - ) + bim.update( + { + 'SecondaryWaterResistance': swr, + 'RoofCover': roof_cover, + 'RoofQuality': roof_quality, + 'RoofDeckAttachmentW': rda, + 'RoofToWallConnection': rwc, + 'Shutters': shutters, + } ) - bldg_config = ( + return ( f"W.MUH." f"{int(stories)}." - f"{BIM['RoofShape']}." + f"{bim['RoofShape']}." f"{roof_cover}." f"{roof_quality}." - f"{SWR}." - f"{RDA}." - f"{RWC}." + f"{swr}." + f"{rda}." + f"{rwc}." f"{int(shutters)}." - f"{int(BIM['TerrainRoughness'])}" + f"{int(bim['TerrainRoughness'])}" ) - - return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindWSFRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindWSFRulesets.py index f0dfbab14..c619e326b 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindWSFRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindWSFRulesets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -43,13 +42,13 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import random import datetime +import random -def WSF_config(BIM): +def WSF_config(bim: dict) -> str: # noqa: C901 """ - Rules to identify a HAZUS WSF configuration based on BIM data + Rules to identify a HAZUS WSF configuration based on BIM data. Parameters ---------- @@ -59,21 +58,21 @@ def WSF_config(BIM): Returns ------- config: str - A string that identifies a specific configration within this buidling - class. - """ + A string that identifies a specific configuration within this + building class. - year = BIM['YearBuilt'] # just for the sake of brevity + """ + year = bim['YearBuilt'] # just for the sake of brevity # Secondary Water Resistance (SWR) # Minimum drainage recommendations are in place in NJ (See below). # However, SWR indicates a code-plus practice. - SWR = False # Default in Reorganzied Rulesets - WIND + swr = False # Default in Reorganzied Rulesets - WIND if year > 2000: # For buildings built after 2000, SWR is based on homeowner compliance # data from NC Coastal Homeowner Survey (2017) to capture potential # human behavior (% of sealed roofs in NC dataset). - SWR = random.random() < 0.6 + swr = random.random() < 0.6 elif year > 1983: # CABO 1995: # According to 903.2 in the 1995 CABO, for roofs with slopes between @@ -92,13 +91,13 @@ def WSF_config(BIM): # Almost all other roof types require underlayment of some sort, but # the ruleset is based on asphalt shingles because it is most # conservative. - if BIM['RoofShape'] == 'flt': # note there is actually no 'flt' - SWR = True - elif BIM['RoofShape'] in ['gab', 'hip']: - if BIM['RoofSlope'] <= 0.17: - SWR = True - elif BIM['RoofSlope'] < 0.33: - SWR = BIM['AvgJanTemp'] == 'below' + if bim['RoofShape'] == 'flt': # note there is actually no 'flt' + swr = True + elif bim['RoofShape'] in {'gab', 'hip'}: + if bim['RoofSlope'] <= 0.17: + swr = True + elif bim['RoofSlope'] < 0.33: + swr = bim['AvgJanTemp'] == 'below' # Roof Deck Attachment (RDA) # IRC codes: @@ -108,7 +107,7 @@ def WSF_config(BIM): # codes. Commentary for Table R602.3(1) indicates 8d nails with 6”/6” # spacing (enhanced roof spacing) for ultimate wind speeds greater than # a speed_lim. speed_lim depends on the year of construction - RDA = '6d' # Default (aka A) in Reorganized Rulesets - WIND + rda = '6d' # Default (aka A) in Reorganized Rulesets - WIND if year > 2000: if year >= 2016: # IRC 2015 @@ -116,38 +115,34 @@ def WSF_config(BIM): else: # IRC 2000 - 2009 speed_lim = 100.0 # mph - if BIM['V_ult'] > speed_lim: - RDA = '8s' # 8d @ 6"/6" ('D' in the Reorganized Rulesets - WIND) + if bim['V_ult'] > speed_lim: + rda = '8s' # 8d @ 6"/6" ('D' in the Reorganized Rulesets - WIND) else: - RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) + rda = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) elif year > 1995: - if (BIM['SheathingThickness'] >= 0.3125) and ( - BIM['SheathingThickness'] <= 0.5 + if (bim['SheathingThickness'] >= 0.3125) and ( + bim['SheathingThickness'] <= 0.5 ): - RDA = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND) - elif (BIM['SheathingThickness'] >= 0.59375) and ( - BIM['SheathingThickness'] <= 1.125 + rda = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND) + elif (bim['SheathingThickness'] >= 0.59375) and ( + bim['SheathingThickness'] <= 1.125 ): - RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) + rda = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) elif year > 1986: - if (BIM['SheathingThickness'] >= 0.3125) and ( - BIM['SheathingThickness'] <= 0.5 - ): - RDA = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND) - elif (BIM['SheathingThickness'] >= 0.59375) and ( - BIM['SheathingThickness'] <= 1.0 - ): - RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) - else: - # year <= 1986 - if (BIM['SheathingThickness'] >= 0.3125) and ( - BIM['SheathingThickness'] <= 0.5 + if (bim['SheathingThickness'] >= 0.3125) and ( + bim['SheathingThickness'] <= 0.5 ): - RDA = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND) - elif (BIM['SheathingThickness'] >= 0.625) and ( - BIM['SheathingThickness'] <= 1.0 + rda = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND) + elif (bim['SheathingThickness'] >= 0.59375) and ( + bim['SheathingThickness'] <= 1.0 ): - RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) + rda = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) + elif (bim['SheathingThickness'] >= 0.3125) and ( + bim['SheathingThickness'] <= 0.5 + ): + rda = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND) + elif (bim['SheathingThickness'] >= 0.625) and (bim['SheathingThickness'] <= 1.0): + rda = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) # Roof-Wall Connection (RWC) # IRC 2015 @@ -159,10 +154,10 @@ def WSF_config(BIM): # will assume that if classified as HazardProneRegion, then enhanced # connection would be used. if year > 2015: - if BIM['HazardProneRegion']: - RWC = 'strap' # Strap + if bim['HazardProneRegion']: + rwc = 'strap' # Strap else: - RWC = 'tnail' # Toe-nail + rwc = 'tnail' # Toe-nail # IRC 2000-2009 # In Section R802.11.1 Uplift Resistance of the NJ 2009 IRC, roof # assemblies which are subject to wind uplift pressures of 20 pounds per @@ -180,10 +175,10 @@ def WSF_config(BIM): # 110 mph begin to generate pressures of 20 psf in high pressure zones of # the roof. Thus 110 mph is used as the critical velocity. elif year > 1992: - if BIM['V_ult'] > 110: - RWC = 'strap' # Strap + if bim['V_ult'] > 110: + rwc = 'strap' # Strap else: - RWC = 'tnail' # Toe-nail + rwc = 'tnail' # Toe-nail # CABO 1989 and earlier # There is no mention of straps or enhanced tie-downs in the CABO codes # older than 1992, and there is no description of these adoptions in IBHS @@ -196,7 +191,7 @@ def WSF_config(BIM): # buildings are toe nails before 1992. else: # year <= 1992 - RWC = 'tnail' # Toe-nail + rwc = 'tnail' # Toe-nail # Shutters # IRC 2000-2015: @@ -209,7 +204,7 @@ def WSF_config(BIM): # and are able to resist component and cladding loads; # Earlier IRC editions provide similar rules. if year > 2000: - shutters = BIM['WindBorneDebris'] + shutters = bim['WindBorneDebris'] # CABO: # Based on Human Subjects Data, roughly 45% of houses built in the 1980s # and 1990s had entries that implied they had shutters on at some or all of @@ -219,12 +214,10 @@ def WSF_config(BIM): # 1992 to 1995, 33/74 entries (44.59%) with shutters # 1986 to 1992, 36/79 entries (45.57%) with shutters # 1983 to 1986, 19/44 entries (43.18%) with shutters + elif bim['WindBorneDebris']: + shutters = random.random() < 0.45 else: - # year <= 2000 - if BIM['WindBorneDebris']: - shutters = random.random() < 0.45 - else: - shutters = False + shutters = False # Garage # As per IRC 2015: @@ -241,60 +234,54 @@ def WSF_config(BIM): # WindBorneDebris (and therefore do not have any strength requirements) that # are older than 30 years are considered to be weak, whereas those from the # last 30 years are considered to be standard. - if BIM['Garage'] == -1: + if bim['Garage'] == -1: # no garage data, using the default "standard" garage = 'std' shutters = 0 # HAZUS ties standard garage to w/o shutters - else: - if year > 2000: - if shutters: - if BIM['Garage'] < 1: - garage = 'no' - else: - garage = 'sup' # SFBC 1994 - shutters = 1 # HAZUS ties SFBC 1994 to with shutters + elif year > 2000: + if shutters: + if bim['Garage'] < 1: + garage = 'no' else: - if BIM['Garage'] < 1: - garage = 'no' # None - else: - garage = 'std' # Standard - shutters = 0 # HAZUS ties standard garage to w/o shutters - elif year > (datetime.datetime.now().year - 30): - if BIM['Garage'] < 1: - garage = 'no' # None - else: - garage = 'std' # Standard - shutters = 0 # HAZUS ties standard garage to w/o shutters + garage = 'sup' # SFBC 1994 + shutters = 1 # HAZUS ties SFBC 1994 to with shutters + elif bim['Garage'] < 1: + garage = 'no' # None else: - # year <= current year - 30 - if BIM['Garage'] < 1: - garage = 'no' # None - else: - garage = 'wkd' # Weak - shutters = 0 # HAZUS ties weak garage to w/o shutters + garage = 'std' # Standard + shutters = 0 # HAZUS ties standard garage to w/o shutters + elif year > (datetime.datetime.now(tz=datetime.timezone.utc).year - 30): + if bim['Garage'] < 1: + garage = 'no' # None + else: + garage = 'std' # Standard + shutters = 0 # HAZUS ties standard garage to w/o shutters + elif bim['Garage'] < 1: + garage = 'no' # None + else: + garage = 'wkd' # Weak + shutters = 0 # HAZUS ties weak garage to w/o shutters # extend the BIM dictionary - BIM.update( - dict( - SecondaryWaterResistance=SWR, - RoofDeckAttachmentW=RDA, - RoofToWallConnection=RWC, - Shutters=shutters, - Garage=garage, - ) + bim.update( + { + 'SecondaryWaterResistance': swr, + 'RoofDeckAttachmentW': rda, + 'RoofToWallConnection': rwc, + 'Shutters': shutters, + 'Garage': garage, + } ) # building configuration tag - bldg_config = ( + return ( f"W.SF." - f"{int(min(BIM['NumberOfStories'],2))}." - f"{BIM['RoofShape']}." - f"{int(SWR)}." - f"{RDA}." - f"{RWC}." + f"{int(min(bim['NumberOfStories'], 2))}." + f"{bim['RoofShape']}." + f"{int(swr)}." + f"{rda}." + f"{rwc}." f"{garage}." f"{int(shutters)}." - f"{int(BIM['TerrainRoughness'])}" + f"{int(bim['TerrainRoughness'])}" ) - - return bldg_config diff --git a/pelicun/tests/maintenance/search_in_functions.py b/pelicun/tests/maintenance/search_in_functions.py index 9dadc9e96..f989ebd7c 100644 --- a/pelicun/tests/maintenance/search_in_functions.py +++ b/pelicun/tests/maintenance/search_in_functions.py @@ -1,8 +1,42 @@ -""" -Code inspection methods/functions. -""" +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + +"""Code inspection methods/functions.""" + +from __future__ import annotations # noqa: I001 +from pathlib import Path -from __future__ import annotations import ast @@ -18,18 +52,18 @@ def visit_FunctionDef( Parameters ---------- - node : ast.FunctionDef + node: ast.FunctionDef The AST node representing the function definition. - filename : str + filename: str The path to the Python file to be searched. - search_string : str + search_string: str The string to search for within the function bodies. - functions_with_string : list[str] + functions_with_string: list[str] The list to append function names that contain the search string. """ - with open(filename, 'r', encoding='utf-8') as f: + with Path(filename).open(encoding='utf-8') as f: contents = f.read() function_code = ast.get_source_segment(contents, node) @@ -46,9 +80,9 @@ def find_functions_with_string(filename: str, search_string: str) -> list[str]: Parameters ---------- - filename : str + filename: str The path to the Python file to be searched. - search_string : str + search_string: str The string to search for within the function bodies. Returns @@ -56,8 +90,9 @@ def find_functions_with_string(filename: str, search_string: str) -> list[str]: list[str] A list of function names that contain the search string in their bodies. + """ - with open(filename, 'r', encoding='utf-8') as file: + with Path(filename).open(encoding='utf-8') as file: contents = file.read() tree = ast.parse(contents, filename=filename) diff --git a/pelicun/tests/util.py b/pelicun/tests/util.py index a2c9fb4a6..3505c5321 100644 --- a/pelicun/tests/util.py +++ b/pelicun/tests/util.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -33,26 +32,16 @@ # # You should have received a copy of the BSD 3-Clause License along with # pelicun. If not, see . -# -# Contributors: -# Adam Zsarnóczay -# John Vouvakis Manousakis -""" -These are utility functions for the unit and integration tests. -""" +"""These are utility functions for the unit and integration tests.""" from __future__ import annotations -import pickle -import os -# pylint: disable=useless-suppression -# pylint: disable=unused-variable -# pylint: disable=pointless-statement -# pylint: disable=missing-return-doc,missing-return-type-doc +import pickle # noqa: S403 +from pathlib import Path -def export_pickle(filepath, obj, makedirs=True): +def export_pickle(filepath, obj, makedirs=True) -> None: # noqa: ANN001, FBT002 """ Auxiliary function to export a pickle object. @@ -69,20 +58,20 @@ def export_pickle(filepath, obj, makedirs=True): """ # extract the directory name - dirname = os.path.dirname(filepath) + dirname = Path(filepath).parent # if making directories is requested, if makedirs: # and the path does not exist - if not os.path.exists(dirname): + if not Path(dirname).exists(): # create the directory - os.makedirs(dirname) + Path(dirname).mkdir(parents=True) # open the file with the given filepath - with open(filepath, 'wb') as f: + with Path(filepath).open('wb') as f: # and store the object in the file pickle.dump(obj, f) -def import_pickle(filepath): +def import_pickle(filepath): # noqa: ANN001, ANN201 """ Auxiliary function to import a pickle object. @@ -97,6 +86,6 @@ def import_pickle(filepath): """ # open the file with the given filepath - with open(filepath, 'rb') as f: + with Path(filepath).open('rb') as f: # and retrieve the pickled object - return pickle.load(f) + return pickle.load(f) # noqa: S301 diff --git a/pelicun/tests/validation/inactive/3d_interpolation.py b/pelicun/tests/validation/inactive/3d_interpolation.py index 3c365539e..c98a58ccf 100644 --- a/pelicun/tests/validation/inactive/3d_interpolation.py +++ b/pelicun/tests/validation/inactive/3d_interpolation.py @@ -1,7 +1,42 @@ +# noqa: N999 +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + """ With this code we verify that scipy's `RegularGridInterpolator` does what we expect. -Created: `Sat Jun 1 03:07:28 PM PDT 2024` +Created: `Sat Jun 1 03:07:28 PM PDT 2024`. """ @@ -32,13 +67,12 @@ interpolated_value = interp_func(test_values) # Compare output with the exact value. -df = pd.DataFrame( +data = pd.DataFrame( { 'exact': x1 + np.sqrt(x2) + np.sin(x3), 'interpolated': interpolated_value, } ) -print(df) # Note: This does work with a 2D case, and it could scale to more than # 3 dimensions. diff --git a/pelicun/tests/validation/inactive/pandas_convert_speed.py b/pelicun/tests/validation/inactive/pandas_convert_speed.py index 8f153ccb1..7f2493556 100644 --- a/pelicun/tests/validation/inactive/pandas_convert_speed.py +++ b/pelicun/tests/validation/inactive/pandas_convert_speed.py @@ -1,55 +1,77 @@ -import pandas as pd -import numpy as np +# Copyright (c) 2018 Leland Stanford Junior University +# Copyright (c) 2018 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . + import time -# pylint: disable=pointless-statement +import numpy as np +import pandas as pd -def benchmark(): +def benchmark() -> None: # Create a large DataFrame - df = pd.DataFrame(np.random.rand(1000000, 10), columns=list('ABCDEFGHIJ')) + data = pd.DataFrame(np.random.rand(1000000, 10), columns=list('ABCDEFGHIJ')) # Measure time for df.to_dict(orient='list') - start_time = time.time() - df.to_dict(orient='list') - end_time = time.time() - print(f'Time taken with to_dict(orient="list"): {end_time - start_time} seconds') + time.time() + data.to_dict(orient='list') + time.time() # Measure time for dictionary comprehension - start_time = time.time() - {col: df[col].tolist() for col in df.columns} - end_time = time.time() - print( - f'Time taken with dictionary comprehension: {end_time - start_time} seconds' - ) + time.time() + {col: data[col].tolist() for col in data.columns} + time.time() # Measure time for dictionary comprehension without to list - start_time = time.time() - {col: df[col] for col in df.columns} - end_time = time.time() - print( - f'Time taken with dictionary comprehension ' - f'without to_list: {end_time - start_time} seconds' - ) + time.time() + {col: data[col] for col in data.columns} + time.time() # Measure time for .values - start_time = time.time() - df.values - end_time = time.time() - print(f'Time taken with .values: {end_time - start_time} seconds') + time.time() + data.to_numpy() + time.time() # Measure time for using df.to_numpy() - start_time = time.time() - data_array = df.to_numpy() - {col: data_array[:, i].tolist() for i, col in enumerate(df.columns)} - end_time = time.time() - print(f'Time taken with df.to_numpy(): {end_time - start_time} seconds') + time.time() + data_array = data.to_numpy() + {col: data_array[:, i].tolist() for i, col in enumerate(data.columns)} + time.time() # Measure time for using df.to_dict() - start_time = time.time() - df.to_dict() - end_time = time.time() - print(f'Time taken with df.to_dict(): {end_time - start_time} seconds') + time.time() + data.to_dict() + time.time() if __name__ == '__main__': diff --git a/pelicun/tests/validation/inactive/readme.md b/pelicun/tests/validation/inactive/readme.md index d3d8900ec..ceb464bca 100644 --- a/pelicun/tests/validation/inactive/readme.md +++ b/pelicun/tests/validation/inactive/readme.md @@ -1,3 +1,3 @@ This directory contains code that is not meant to be tested or -updated, but was used to verify outputs of vairous external libraries +updated, but was used to verify outputs of various external libraries we utilize and ensure they are in line with our expectations. diff --git a/pelicun/tests/validation/v2/test_validation_2.py b/pelicun/tests/validation/v2/test_validation_2.py index 1c6e0e9ec..111113f7d 100644 --- a/pelicun/tests/validation/v2/test_validation_2.py +++ b/pelicun/tests/validation/v2/test_validation_2.py @@ -47,7 +47,7 @@ import pelicun from pelicun import assessment, file_io -from pelicun.warnings import PelicunWarning +from pelicun.pelicun_warnings import PelicunWarning def test_combined_workflow() -> None: @@ -132,7 +132,7 @@ def add_more_edps() -> None: ) demand_sample_ext = pd.concat([demand_sample, rid], axis=1) # type: ignore - demand_sample_ext[('SA_1.13', 0, 1)] = 1.50 + demand_sample_ext['SA_1.13', 0, 1] = 1.50 # Add units to the data demand_sample_ext.T.insert(0, 'Units', '') diff --git a/pelicun/tools/DL_calculation.py b/pelicun/tools/DL_calculation.py index d430ef1b9..bc0da31fe 100644 --- a/pelicun/tools/DL_calculation.py +++ b/pelicun/tools/DL_calculation.py @@ -1,5 +1,4 @@ -# -*- coding: utf-8 -*- -# +# # noqa: N999 # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California # @@ -38,51 +37,45 @@ # Adam Zsarnóczay # John Vouvakis Manousakis -""" -This module provides the main functionality to run a pelicun -calculation from the command line. - -""" +"""Main functionality to run a pelicun calculation from the command line.""" from __future__ import annotations -from time import gmtime -from time import strftime -import sys -import os -import json + import argparse +import json +import os +import sys from pathlib import Path - -import numpy as np -import pandas as pd +from time import gmtime, strftime +from typing import Hashable import colorama -from colorama import Fore -from colorama import Style - import jsonschema +import numpy as np +import pandas as pd +from colorama import Fore, Style from jsonschema import validate -import pelicun -from pelicun.auto import auto_populate -from pelicun.base import str2bool -from pelicun.base import convert_to_MultiIndex -from pelicun.base import convert_to_SimpleIndex -from pelicun.base import describe -from pelicun.base import get -from pelicun.base import update -from pelicun.base import is_specified -from pelicun.base import is_unspecified from pelicun import base from pelicun.assessment import DLCalculationAssessment -from pelicun.warnings import PelicunInvalidConfigError - +from pelicun.auto import auto_populate +from pelicun.base import ( + convert_to_MultiIndex, + convert_to_SimpleIndex, + describe, + get, + is_specified, + is_unspecified, + str2bool, + update, +) +from pelicun.pelicun_warnings import PelicunInvalidConfigError colorama.init() -sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) +sys.path.insert(0, Path(__file__).resolve().parent.absolute().as_posix()) -def log_msg(msg, color_codes=None): +def log_msg(msg: str, color_codes: tuple[str, str] | None = None) -> None: """ Print a formatted log message with a timestamp. @@ -96,16 +89,14 @@ def log_msg(msg, color_codes=None): """ if color_codes: cpref, csuff = color_codes - formatted_msg = ( + ( f'{strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())} ' f'{cpref}' f'{msg}' f'{csuff}' ) else: - formatted_msg = f'{strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())} {msg}' - - print(formatted_msg) + f'{strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())} {msg}' # list of output files help perform safe initialization of output dir @@ -184,13 +175,13 @@ def log_msg(msg, color_codes=None): } -def convert_df_to_dict(df, axis=1): +def convert_df_to_dict(data: pd.DataFrame | pd.Series, axis: int = 1) -> dict: """ Convert a pandas DataFrame to a dictionary. Parameters ---------- - df : pd.DataFrame + data : pd.DataFrame The DataFrame to be converted. axis : int, optional The axis to consider for the conversion. @@ -220,31 +211,31 @@ def convert_df_to_dict(df, axis=1): as values. """ - - out_dict = {} + out_dict: dict[Hashable, object] = {} if axis == 1: - df_in = df + df_in = data elif axis == 0: - df_in = df.T + df_in = data.T else: - raise ValueError('`axis` must be `0` or `1`') + msg = '`axis` must be `0` or `1`' + raise ValueError(msg) - MI = df_in.columns + multiindex = df_in.columns - for label in MI.unique(level=0): + for label in multiindex.unique(level=0): out_dict.update({label: np.nan}) sub_df = df_in[label] skip_sub = True - if MI.nlevels > 1: + if multiindex.nlevels > 1: skip_sub = False - if isinstance(sub_df, pd.Series): - skip_sub = True - elif (len(sub_df.columns) == 1) and (sub_df.columns[0] == ''): + if isinstance(sub_df, pd.Series) or ( + (len(sub_df.columns) == 1) and (sub_df.columns[0] == '') # noqa: PLC1901 + ): skip_sub = True if not skip_sub: @@ -261,17 +252,17 @@ def convert_df_to_dict(df, axis=1): def run_pelicun( - config_path, - demand_file, - output_path, - realizations, - detailed_results, - coupled_EDP, - auto_script_path, - custom_model_dir, - color_warnings, - output_format, -): + config_path: str, + demand_file: str, + output_path: str | None, + realizations: int, + auto_script_path: str | None, + custom_model_dir: str | None, + output_format: list | None, + *, + detailed_results: bool, + coupled_edp: bool, +) -> None: """ Use settings in the config JSON to prepare and run a Pelicun calculation. @@ -283,67 +274,57 @@ def run_pelicun( Path pointing to the location of a CSV file with the demand data. output_path: string, optional Path pointing to the location where results shall be saved. - coupled_EDP: bool, optional - If True, EDPs are not resampled and processed in order. realizations: int, optional Number of realizations to generate. auto_script_path: string, optional Path pointing to the location of a Python script with an auto_populate method that automatically creates the performance model using data provided in the AIM JSON file. - detailed_results: bool, optional - If False, only the main statistics are saved. - output_format: str - Type of output format, JSON or CSV. custom_model_dir: string, optional Path pointing to a directory with files that define user-provided model parameters for a customized damage and loss assessment. - color_warnings: bool, optional - If True, warnings are printed in red on the console. If output - is redirected to a file, it will contain ANSI codes. When - viewed on the console with `cat`, `less`, or similar utilities, - the color will be shown. - - Raises - ------ - PelicunInvalidConfigError - When the config file is invalid or contains missing entries. + output_format: list, optional. + Type of output format, JSON or CSV. + Valid options: ['csv', 'json'], ['csv'], ['json'], [], None + detailed_results: bool, optional + If False, only the main statistics are saved. + coupled_edp: bool, optional + If True, EDPs are not resampled and processed in order. """ - log_msg('First line of DL_calculation') # Initial setup ----------------------------------------------------------- # get the absolute path to the config file - config_path = Path(config_path).resolve() + config_path_p = Path(config_path).resolve() # If the output path was not specified, results are saved in the # directory of the input file. if output_path is None: - output_path = config_path.parents[0] + output_path_p = config_path_p.parents[0] else: - output_path = Path(output_path) + output_path_p = Path(output_path).resolve() # create the directory if it does not exist - if not os.path.exists(output_path): - os.makedirs(output_path, exist_ok=True) + if not output_path_p.exists(): + output_path_p.mkdir(parents=True) # parse the config file config = _parse_config_file( - config_path, - output_path, - auto_script_path, + config_path_p, + output_path_p, + Path(auto_script_path).resolve() if auto_script_path is not None else None, demand_file, realizations, - coupled_EDP, - detailed_results, output_format, + coupled_edp=coupled_edp, + detailed_results=detailed_results, ) - # Initialize the array that we'll use to collect the output file names - out_files = [] + # List to keep track of the generated output files. + out_files: list[str] = [] - _remove_existing_files(output_path, known_output_files) + _remove_existing_files(output_path_p, known_output_files) # Run the assessment assessment = DLCalculationAssessment(config_options=get(config, 'DL/Options')) @@ -354,11 +335,11 @@ def run_pelicun( length_unit=get(config, 'GeneralInformation/units/length', default=None), demand_calibration=get(config, 'DL/Demands/Calibration', default=None), sample_size=get(config, 'DL/Options/Sampling/SampleSize'), - coupled_demands=get(config, 'DL/Demands/CoupledDemands', default=False), demand_cloning=get(config, 'DL/Demands/DemandCloning', default=None), residual_drift_inference=get( config, 'DL/Demands/InferResidualDrift', default=None ), + coupled_demands=get(config, 'DL/Demands/CoupledDemands', default=False), ) if is_specified(config, 'DL/Asset'): @@ -370,12 +351,12 @@ def run_pelicun( collapse_fragility_demand_type=get( config, 'DL/Damage/CollapseFragility/DemandType', default=None ), - add_irreparable_damage_columns=get( - config, 'DL/Damage/IrreparableDamage', default=False - ), component_sample_file=get( config, 'DL/Asset/ComponentSampleFile', default=None ), + add_irreparable_damage_columns=get( + config, 'DL/Damage/IrreparableDamage', default=False + ), ) if is_specified(config, 'DL/Damage'): @@ -388,9 +369,6 @@ def run_pelicun( collapse_fragility=get( config, 'DL/Damage/CollapseFragility', default=None ), - is_for_water_network_assessment=is_specified( - config, 'DL/Asset/ComponentDatabase/Water' - ), irreparable_damage=get( config, 'DL/Damage/IrreparableDamage', default=None ), @@ -401,6 +379,9 @@ def run_pelicun( config, 'DL/Damage/DamageProcessFilePath', default=None ), custom_model_dir=custom_model_dir, + is_for_water_network_assessment=is_specified( + config, 'DL/Asset/ComponentDatabase/Water' + ), ) if is_unspecified(config, 'DL/Losses/Repair'): @@ -443,14 +424,14 @@ def run_pelicun( if is_specified(config, 'DL/Outputs/Demand'): output_config = get(config, 'DL/Outputs/Demand') - _demand_save(output_config, assessment, output_path, out_files) + _demand_save(output_config, assessment, output_path_p, out_files) if is_specified(config, 'DL/Outputs/Asset'): output_config = get(config, 'DL/Outputs/Asset') _asset_save( output_config, assessment, - output_path, + output_path_p, out_files, aggregate_colocated=get( config, @@ -464,7 +445,7 @@ def run_pelicun( _damage_save( output_config, assessment, - output_path, + output_path_p, out_files, aggregate_colocated=get( config, @@ -480,10 +461,11 @@ def run_pelicun( if is_specified(config, 'DL/Outputs/Loss/Repair'): output_config = get(config, 'DL/Outputs/Loss/Repair') + assert agg_repair is not None _loss_save( output_config, assessment, - output_path, + output_path_p, out_files, agg_repair, aggregate_colocated=get( @@ -492,12 +474,12 @@ def run_pelicun( default=False, ), ) - _summary_save(summary, summary_stats, output_path, out_files) - _create_json_files_if_requested(config, out_files, output_path) - _remove_csv_files_if_not_requested(config, out_files, output_path) + _summary_save(summary, summary_stats, output_path_p, out_files) + _create_json_files_if_requested(config, out_files, output_path_p) + _remove_csv_files_if_not_requested(config, out_files, output_path_p) -def _parse_decision_variables(config): +def _parse_decision_variables(config: dict) -> tuple[str, ...]: """ Parse decision variables from the config file. @@ -512,17 +494,19 @@ def _parse_decision_variables(config): List of decision variables. """ - decision_variables = [] + decision_variables: list[str] = [] if get(config, 'DL/Losses/Repair/DecisionVariables', default=False) is not False: - for DV_i, DV_status in get( + for dv_i, dv_status in get( config, 'DL/Losses/Repair/DecisionVariables' ).items(): - if DV_status is True: - decision_variables.append(DV_i) - return decision_variables + if dv_status is True: + decision_variables.append(dv_i) + return tuple(decision_variables) -def _remove_csv_files_if_not_requested(config, out_files, output_path): +def _remove_csv_files_if_not_requested( + config: dict, out_files: list[str], output_path: Path +) -> None: """ Remove CSV files if not requested in config. @@ -543,10 +527,15 @@ def _remove_csv_files_if_not_requested(config, out_files, output_path): # keep the DL_summary and DL_summary_stats files if 'DL_summary' in filename: continue - os.remove(output_path / filename) + Path(output_path / filename).unlink() -def _summary_save(summary, summary_stats, output_path, out_files): +def _summary_save( + summary: pd.DataFrame, + summary_stats: pd.DataFrame, + output_path: Path, + out_files: list[str], +) -> None: """ Save summary results to CSV files. @@ -573,16 +562,17 @@ def _summary_save(summary, summary_stats, output_path, out_files): out_files.append('DL_summary_stats.csv') -def _parse_config_file( - config_path, - output_path, - auto_script_path, - demand_file, - realizations, - coupled_EDP, - detailed_results, - output_format, -): +def _parse_config_file( # noqa: C901 + config_path: Path, + output_path: Path, + auto_script_path: Path | None, + demand_file: str, + realizations: int, + output_format: list | None, + *, + coupled_edp: bool, + detailed_results: bool, +) -> dict[str, object]: """ Parse and validate the config file for Pelicun. @@ -610,14 +600,20 @@ def _parse_config_file( dict Parsed and validated configuration. + Raises + ------ + PelicunInvalidConfigError + If the provided config file does not conform to the schema or + there are issues with the specified values. + """ # open the config file and parse it - with open(config_path, 'r', encoding='utf-8') as f: + with Path(config_path).open(encoding='utf-8') as f: config = json.load(f) # load the schema - with open( - f'{base.pelicun_path}/settings/input_schema.json', 'r', encoding='utf-8' + with Path(f'{base.pelicun_path}/settings/input_schema.json').open( + encoding='utf-8' ) as f: schema = json.load(f) @@ -625,33 +621,34 @@ def _parse_config_file( try: validate(instance=config, schema=schema) except jsonschema.exceptions.ValidationError as exc: - raise PelicunInvalidConfigError( - 'The provided config file does not conform to the schema.' - ) from exc + msg = 'The provided config file does not conform to the schema.' + raise PelicunInvalidConfigError(msg) from exc if is_unspecified(config, 'DL'): log_msg('Damage and Loss configuration missing from config file. ') if auto_script_path is None: - raise PelicunInvalidConfigError('No `DL` entry in config file.') + msg = 'No `DL` entry in config file.' + raise PelicunInvalidConfigError(msg) log_msg('Trying to auto-populate') - config_ap, CMP = auto_populate(config, auto_script_path) + config_ap, comp = auto_populate(config, auto_script_path) if is_unspecified(config_ap, 'DL'): - raise PelicunInvalidConfigError( + msg = ( 'No `DL` entry in config file, and ' 'the prescribed auto-population script failed to identify ' 'a valid damage and loss configuration for this asset. ' ) + raise PelicunInvalidConfigError(msg) # add the demand information update(config_ap, '/DL/Demands/DemandFilePath', demand_file) update(config_ap, '/DL/Demands/SampleSize', str(realizations)) - if coupled_EDP is True: - update(config_ap, 'DL/Demands/CoupledDemands', True) + if coupled_edp is True: + update(config_ap, 'DL/Demands/CoupledDemands', value=True) else: update( @@ -661,7 +658,7 @@ def _parse_config_file( ) # save the component data - CMP.to_csv(output_path / 'CMP_QNT.csv') + comp.to_csv(output_path / 'CMP_QNT.csv') # update the config file with the location update( @@ -686,7 +683,7 @@ def _parse_config_file( # save the extended config to a file config_ap_path = Path(config_path.stem + '_ap.json').resolve() - with open(config_ap_path, 'w', encoding='utf-8') as f: + with Path(config_ap_path).open('w', encoding='utf-8') as f: json.dump(config_ap, f, indent=2) update(config, 'DL', get(config_ap, 'DL')) @@ -696,9 +693,8 @@ def _parse_config_file( if not sample_size_str: sample_size_str = get(config, 'DL/Demands/SampleSize') if not sample_size_str: - raise PelicunInvalidConfigError( - 'Sample size not provided in config file.' - ) + msg = 'Sample size not provided in config file.' + raise PelicunInvalidConfigError(msg) update(config, 'DL/Options/Sampling/SampleSize', int(sample_size_str)) # provide all outputs if the files are not specified @@ -726,10 +722,12 @@ def _parse_config_file( update(config, 'DL/Outputs/Settings', pbe_settings) if is_unspecified(config, 'DL/Demands'): - raise PelicunInvalidConfigError('Demand configuration missing.') + msg = 'Demand configuration missing.' + raise PelicunInvalidConfigError(msg) if is_unspecified(config, 'DL/Asset'): - raise PelicunInvalidConfigError('Asset configuration missing.') + msg = 'Asset configuration missing.' + raise PelicunInvalidConfigError(msg) update( config, @@ -740,7 +738,7 @@ def _parse_config_file( update( config, 'DL/Options/Verbose', - True, + value=True, only_if_empty_or_none=True, ) @@ -748,7 +746,10 @@ def _parse_config_file( # then use True as default for DL_calculations regardless of what # the Pelicun default is. update( - config, 'DL/Options/ListAllDamageStates', True, only_if_empty_or_none=True + config, + 'DL/Options/ListAllDamageStates', + value=True, + only_if_empty_or_none=True, ) # if the demand file location is not specified in the config file @@ -784,63 +785,68 @@ def _parse_config_file( if is_specified(config, 'DL/Demands/InferResidualDrift') and is_unspecified( config, 'DL/Demands/InferResidualDrift/method' ): - raise PelicunInvalidConfigError( - 'No method is specified in residual drift inference configuration.' - ) + msg = 'No method is specified in residual drift inference configuration.' + raise PelicunInvalidConfigError(msg) # Ensure `DL/Damage/CollapseFragility` contains all required keys. if is_specified(config, 'DL/Damage/CollapseFragility'): for thing in ('CapacityDistribution', 'CapacityMedian', 'Theta_1'): if is_unspecified(config, f'DL/Damage/CollapseFragility/{thing}'): - raise PelicunInvalidConfigError( + msg = ( f'`{thing}` is missing from DL/Damage/CollapseFragility' f' in the configuration file.' ) + raise PelicunInvalidConfigError(msg) # Ensure `DL/Damage/IrreparableDamage` contains all required keys. if is_specified(config, 'DL/Damage/IrreparableDamage'): for thing in ('DriftCapacityMedian', 'DriftCapacityLogStd'): if is_unspecified(config, f'DL/Damage/IrreparableDamage/{thing}'): - raise PelicunInvalidConfigError( + msg = ( f'`{thing}` is missing from DL/Damage/IrreparableDamage' f' in the configuration file.' ) + raise PelicunInvalidConfigError(msg) # If the damage process approach is `User Defined` there needs to # be a damage process file path. if get(config, 'DL/Damage/DamageProcess') == 'User Defined' and is_unspecified( config, 'DL/Damage/DamageProcessFilePath' ): - raise PelicunInvalidConfigError( + msg = ( 'When `DL/Damage/DamageProcess` is set to `User Defined`, ' 'a path needs to be specified under ' '`DL/Damage/DamageProcessFilePath`.' ) + raise PelicunInvalidConfigError(msg) # Getting results requires running the calculations. if is_specified(config, 'DL/Outputs/Asset') and is_unspecified( config, 'DL/Asset' ): - raise PelicunInvalidConfigError( + msg = ( 'No asset data specified in config file. ' 'Cannot generate asset model outputs.' ) + raise PelicunInvalidConfigError(msg) if is_specified(config, 'DL/Outputs/Damage') and is_unspecified( config, 'DL/Damage' ): - raise PelicunInvalidConfigError( + msg = ( 'No damage data specified in config file. ' 'Cannot generate damage model outputs.' ) + raise PelicunInvalidConfigError(msg) if is_specified(config, 'DL/Outputs/Loss') and is_unspecified( config, 'DL/Losses' ): - raise PelicunInvalidConfigError( + msg = ( 'No loss data specified in config file. ' 'Cannot generate loss model outputs.' ) + raise PelicunInvalidConfigError(msg) # Ensure only one of `component_assignment_file` or # `component_sample_file` is provided. @@ -848,22 +854,23 @@ def _parse_config_file( if ( (get(config, 'DL/Asset/ComponentAssignmentFile') is None) and (get(config, 'DL/Asset/ComponentSampleFile') is None) - or ( - (get(config, 'DL/Asset/ComponentAssignmentFile') is not None) - and (get(config, 'DL/Asset/ComponentSampleFile') is not None) - ) + ) or ( + (get(config, 'DL/Asset/ComponentAssignmentFile') is not None) + and (get(config, 'DL/Asset/ComponentSampleFile') is not None) ): msg = ( - 'In the asset model configuraiton, it is ' + 'In the asset model configuration, it is ' 'required to specify one of `component_assignment_file` ' 'or `component_sample_file`, but not both.' ) - raise ValueError(msg) + raise PelicunInvalidConfigError(msg) return config -def _create_json_files_if_requested(config, out_files, output_path): +def _create_json_files_if_requested( + config: dict, out_files: list[str], output_path: Path +) -> None: """ Create JSON files if requested in the config. @@ -888,20 +895,21 @@ def _create_json_files_if_requested(config, out_files, output_path): get(config, 'DL/Outputs/Settings/SimpleIndexInJSON', default=False) is True ): - df = pd.read_csv(output_path / filename, index_col=0) + data = pd.read_csv(output_path / filename, index_col=0) else: - df = convert_to_MultiIndex( + data = convert_to_MultiIndex( pd.read_csv(output_path / filename, index_col=0), axis=1 ) - if 'Units' in df.index: + if 'Units' in data.index: df_units = convert_to_SimpleIndex( - df.loc['Units', :].to_frame().T, axis=1 + data.loc['Units', :].to_frame().T, # type: ignore + axis=1, ) - df.drop('Units', axis=0, inplace=True) + data = data.drop('Units', axis=0) - out_dict = convert_df_to_dict(df) + out_dict = convert_df_to_dict(data) out_dict.update( { @@ -912,13 +920,15 @@ def _create_json_files_if_requested(config, out_files, output_path): ) else: - out_dict = convert_df_to_dict(df) + out_dict = convert_df_to_dict(data) - with open(output_path / filename_json, 'w', encoding='utf-8') as f: + with Path(output_path / filename_json).open('w', encoding='utf-8') as f: json.dump(out_dict, f, indent=2) -def _result_summary(assessment, agg_repair): +def _result_summary( + assessment: DLCalculationAssessment, agg_repair: pd.DataFrame | None +) -> tuple[pd.DataFrame, pd.DataFrame]: """ Generate a summary of the results. @@ -937,9 +947,11 @@ def _result_summary(assessment, agg_repair): """ damage_sample = assessment.damage.save_sample() if damage_sample is None or agg_repair is None: - return None, None + return pd.DataFrame(), pd.DataFrame() - damage_sample = damage_sample.groupby(level=['cmp', 'ds'], axis=1).sum() + assert isinstance(damage_sample, pd.DataFrame) + damage_sample = damage_sample.groupby(level=['cmp', 'ds'], axis=1).sum() # type: ignore + assert isinstance(damage_sample, pd.DataFrame) damage_sample_s = convert_to_SimpleIndex(damage_sample, axis=1) if 'collapse-1' in damage_sample_s.columns: @@ -967,7 +979,7 @@ def _result_summary(assessment, agg_repair): return summary, summary_stats -def _parse_requested_output_file_names(output_config): +def _parse_requested_output_file_names(output_config: dict) -> set[str]: """ Parse the output file names from the output configuration. @@ -989,7 +1001,12 @@ def _parse_requested_output_file_names(output_config): return set(out_reqs) -def _demand_save(output_config, assessment, output_path, out_files): +def _demand_save( + output_config: dict, + assessment: DLCalculationAssessment, + output_path: Path, + out_files: list[str], +) -> None: """ Save demand results to files based on the output config. @@ -1007,8 +1024,12 @@ def _demand_save(output_config, assessment, output_path, out_files): """ out_reqs = _parse_requested_output_file_names(output_config) - demand_sample, demand_units = assessment.demand.save_sample(save_units=True) - demand_units = demand_units.to_frame().T + demand_sample, demand_units_series = assessment.demand.save_sample( + save_units=True + ) + assert isinstance(demand_sample, pd.DataFrame) + assert isinstance(demand_units_series, pd.Series) + demand_units = demand_units_series.to_frame().T if 'Sample' in out_reqs: demand_sample_s = pd.concat([demand_sample, demand_units]) @@ -1032,8 +1053,13 @@ def _demand_save(output_config, assessment, output_path, out_files): def _asset_save( - output_config, assessment, output_path, out_files, aggregate_colocated=False -): + output_config: dict, + assessment: DLCalculationAssessment, + output_path: Path, + out_files: list[str], + *, + aggregate_colocated: bool = False, +) -> None: """ Save asset results to files based on the output config. @@ -1051,12 +1077,14 @@ def _asset_save( Whether to aggregate colocated components. Default is False. """ - cmp_sample, cmp_units = assessment.asset.save_cmp_sample(save_units=True) - cmp_units = cmp_units.to_frame().T + output = assessment.asset.save_cmp_sample(save_units=True) + assert isinstance(output, tuple) + cmp_sample, cmp_units = output + cmp_units = cmp_units.to_frame().T # type: ignore if aggregate_colocated: - cmp_units = cmp_units.groupby(level=['cmp', 'loc', 'dir'], axis=1).first() - cmp_groupby_uid = cmp_sample.groupby(level=['cmp', 'loc', 'dir'], axis=1) + cmp_units = cmp_units.groupby(level=['cmp', 'loc', 'dir'], axis=1).first() # type: ignore + cmp_groupby_uid = cmp_sample.groupby(level=['cmp', 'loc', 'dir'], axis=1) # type: ignore cmp_sample = cmp_groupby_uid.sum().mask(cmp_groupby_uid.count() == 0, np.nan) out_reqs = _parse_requested_output_file_names(output_config) @@ -1084,13 +1112,14 @@ def _asset_save( def _damage_save( - output_config, - assessment, - output_path, - out_files, - aggregate_colocated=False, - condense_ds=False, -): + output_config: dict, + assessment: DLCalculationAssessment, + output_path: Path, + out_files: list[str], + *, + aggregate_colocated: bool = False, + condense_ds: bool = False, +) -> None: """ Save damage results to files based on the output config. @@ -1110,14 +1139,16 @@ def _damage_save( Whether to condense damage states. Default is False. """ - damage_sample, damage_units = assessment.damage.save_sample(save_units=True) - damage_units = damage_units.to_frame().T + output = assessment.damage.save_sample(save_units=True) + assert isinstance(output, tuple) + damage_sample, damage_units_series = output + damage_units = damage_units_series.to_frame().T if aggregate_colocated: - damage_units = damage_units.groupby( + damage_units = damage_units.groupby( # type: ignore level=['cmp', 'loc', 'dir', 'ds'], axis=1 ).first() - damage_groupby_uid = damage_sample.groupby( + damage_groupby_uid = damage_sample.groupby( # type: ignore level=['cmp', 'loc', 'dir', 'ds'], axis=1 ) damage_sample = damage_groupby_uid.sum().mask( @@ -1153,13 +1184,13 @@ def _damage_save( if out_reqs.intersection({'GroupedSample', 'GroupedStatistics'}): if aggregate_colocated: - damage_groupby = damage_sample.groupby(level=['cmp', 'ds'], axis=1) - damage_units = damage_units.groupby(level=['cmp', 'ds'], axis=1).first() + damage_groupby = damage_sample.groupby(level=['cmp', 'ds'], axis=1) # type: ignore + damage_units = damage_units.groupby(level=['cmp', 'ds'], axis=1).first() # type: ignore else: - damage_groupby = damage_sample.groupby( + damage_groupby = damage_sample.groupby( # type: ignore level=['cmp', 'loc', 'dir', 'ds'], axis=1 ) - damage_units = damage_units.groupby( + damage_units = damage_units.groupby( # type: ignore level=['cmp', 'loc', 'dir', 'ds'], axis=1 ).first() @@ -1168,7 +1199,9 @@ def _damage_save( # if requested, condense DS output if condense_ds: # replace non-zero values with 1 - grp_damage = grp_damage.mask(grp_damage.astype(np.float64).values > 0, 1) + grp_damage = grp_damage.mask( + grp_damage.astype(np.float64).to_numpy() > 0, 1 + ) # get the corresponding DS for each column ds_list = grp_damage.columns.get_level_values('ds').astype(int) @@ -1187,7 +1220,7 @@ def _damage_save( # aggregate units to the same format # assume identical units across locations for each comp - damage_units = damage_units.groupby(level=['cmp', 'ds'], axis=1).first() + damage_units = damage_units.groupby(level=['cmp', 'ds'], axis=1).first() # type: ignore else: # otherwise, aggregate damage quantities for each comp @@ -1199,7 +1232,7 @@ def _damage_save( ) # and aggregate units to the same format - damage_units = damage_units.groupby(level='cmp', axis=1).first() + damage_units = damage_units.groupby(level='cmp', axis=1).first() # type: ignore if 'GroupedSample' in out_reqs: grp_damage_s = pd.concat([grp_damage, damage_units]) @@ -1228,13 +1261,14 @@ def _damage_save( def _loss_save( - output_config, - assessment, - output_path, - out_files, - agg_repair, - aggregate_colocated=False, -): + output_config: dict, + assessment: DLCalculationAssessment, + output_path: Path, + out_files: list[str], + agg_repair: pd.DataFrame, + *, + aggregate_colocated: bool = False, +) -> None: """ Save loss results to files based on the output config. @@ -1254,16 +1288,16 @@ def _loss_save( Whether to aggregate colocated components. Default is False. """ - repair_sample, repair_units = assessment.loss.ds_model.save_sample( - save_units=True - ) - repair_units = repair_units.to_frame().T + out = assessment.loss.ds_model.save_sample(save_units=True) + assert isinstance(out, tuple) + repair_sample, repair_units_series = out + repair_units = repair_units_series.to_frame().T if aggregate_colocated: - repair_units = repair_units.groupby( + repair_units = repair_units.groupby( # type: ignore level=['dv', 'loss', 'dmg', 'ds', 'loc', 'dir'], axis=1 ).first() - repair_groupby_uid = repair_sample.groupby( + repair_groupby_uid = repair_sample.groupby( # type: ignore level=['dv', 'loss', 'dmg', 'ds', 'loc', 'dir'], axis=1 ) repair_sample = repair_groupby_uid.sum().mask( @@ -1299,8 +1333,8 @@ def _loss_save( out_files.append('DV_repair_stats.csv') if out_reqs.intersection({'GroupedSample', 'GroupedStatistics'}): - repair_groupby = repair_sample.groupby(level=['dv', 'loss', 'dmg'], axis=1) - repair_units = repair_units.groupby( + repair_groupby = repair_sample.groupby(level=['dv', 'loss', 'dmg'], axis=1) # type: ignore + repair_units = repair_units.groupby( # type: ignore level=['dv', 'loss', 'dmg'], axis=1 ).first() grp_repair = repair_groupby.sum().mask(repair_groupby.count() == 0, np.nan) @@ -1352,31 +1386,7 @@ def _loss_save( out_files.append('DV_repair_agg_stats.csv') -def _get_color_codes(color_warnings): - """ - Get color codes for formatting warnings. - - Parameters - ---------- - color_warnings : bool - Whether to enable colored warnings. - - Returns - ------- - tuple - Color codes for prefix and suffix. - - """ - if color_warnings: - cpref = Fore.RED - csuff = Style.RESET_ALL - else: - cpref = csuff = '' - - return (cpref, csuff) - - -def _remove_existing_files(output_path, known_output_files): +def _remove_existing_files(output_path: Path, known_output_files: list[str]) -> None: """ Remove known existing files from the specified output path. @@ -1405,20 +1415,18 @@ def _remove_existing_files(output_path, known_output_files): for filename in files: if filename in known_output_files: try: - os.remove(output_path / filename) + (output_path / filename).unlink() except OSError as exc: - raise OSError( + msg = ( f'Error occurred while removing ' f'`{output_path / filename}`: {exc}' - ) from exc - + ) + raise OSError(msg) from exc -def main(): - """ - Main method to parse arguments and run the pelicun calculation. - """ - args = sys.argv[1:] +def main() -> None: + """Parse arguments and run the pelicun calculation.""" + args_list = sys.argv[1:] parser = argparse.ArgumentParser() parser.add_argument( @@ -1485,17 +1493,18 @@ def main(): default=None, help='Desired output format for the results.', ) - parser.add_argument( - '--color_warnings', - default=False, - type=str2bool, - nargs='?', - const=False, - help=( - 'Enable colored warnings in the console ' - 'output (True/False). Defaults to False.' - ), - ) + # TODO(JVM): fix color warnings + # parser.add_argument( + # '--color_warnings', + # default=False, + # type=str2bool, + # nargs='?', + # const=False, + # help=( + # 'Enable colored warnings in the console ' + # 'output (True/False). Defaults to False.' + # ), + # ) parser.add_argument( '--ground_failure', default=False, @@ -1514,17 +1523,11 @@ def main(): ) parser.add_argument('--resource_dir', default=None) - if not args: - print(f'Welcome. This is pelicun version {pelicun.__version__}') - print( - 'To access the documentation visit ' - 'https://nheri-simcenter.github.io/pelicun/index.html' - ) - print() + if not args_list: parser.print_help() return - args = parser.parse_args(args) + args = parser.parse_args(args_list) log_msg('Initializing pelicun calculation.') @@ -1533,12 +1536,11 @@ def main(): demand_file=args.demandFile, output_path=args.dirnameOutput, realizations=args.Realizations, - detailed_results=args.detailed_results, - coupled_EDP=args.coupled_EDP, auto_script_path=args.auto_script, custom_model_dir=args.custom_model_dir, - color_warnings=args.color_warnings, output_format=args.output_format, + detailed_results=args.detailed_results, + coupled_edp=args.coupled_EDP, ) log_msg('pelicun calculation completed.') diff --git a/pelicun/tools/HDF_to_CSV.py b/pelicun/tools/HDF_to_CSV.py index 21f8d1941..3cd27bddc 100644 --- a/pelicun/tools/HDF_to_CSV.py +++ b/pelicun/tools/HDF_to_CSV.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -33,28 +32,26 @@ # # You should have received a copy of the BSD 3-Clause License along with # pelicun. If not, see . -# -# Contributors: -# Adam Zsarnóczay from __future__ import annotations -import pandas as pd -import sys + import argparse +import sys from pathlib import Path +import pandas as pd -def convert_HDF(HDF_path): - HDF_ext = HDF_path.split('.')[-1] - CSV_base = HDF_path[: -len(HDF_ext) - 1] - HDF_path = Path(HDF_path).resolve() +def convert_HDF(hdf_path) -> None: # noqa: N802 + hdf_ext = hdf_path.split('.')[-1] + csv_base = hdf_path[: -len(hdf_ext) - 1] - store = pd.HDFStore(HDF_path) + hdf_path = Path(hdf_path).resolve() - for key in store.keys(): + store = pd.HDFStore(hdf_path) - store[key].to_csv(f'{CSV_base}_{key[1:].replace("/", "_")}.csv') + for key in store: + store[key].to_csv(f'{csv_base}_{key[1:].replace("/", "_")}.csv') store.close() diff --git a/pelicun/uq.py b/pelicun/uq.py index 245e90928..fdeff8c49 100644 --- a/pelicun/uq.py +++ b/pelicun/uq.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -33,34 +32,28 @@ # # You should have received a copy of the BSD 3-Clause License along with # pelicun. If not, see . -# -# Contributors: -# Adam Zsarnóczay -# John Vouvakis Manousakis - -""" -This module defines constants, classes and methods for uncertainty -quantification in pelicun. -""" +"""Constants, classes and methods for uncertainty quantification.""" from __future__ import annotations -from typing import TYPE_CHECKING -from collections.abc import Callable + from abc import ABC, abstractmethod -from scipy.stats import uniform, norm # type: ignore -from scipy.stats import multivariate_normal as mvn # type: ignore -from scipy.stats import weibull_min -from scipy.stats._mvn import mvndst # type: ignore # pylint: disable=no-name-in-module # noqa # lol -from scipy.linalg import cholesky, svd # type: ignore -from scipy.optimize import minimize # type: ignore +from typing import TYPE_CHECKING + +import colorama import numpy as np import pandas as pd -import colorama -from colorama import Fore -from colorama import Style +from scipy.linalg import cholesky, svd # type: ignore +from scipy.optimize import minimize # type: ignore +from scipy.stats import multivariate_normal as mvn # type: ignore +from scipy.stats import norm, uniform, weibull_min # type: ignore +from scipy.stats._mvn import ( + mvndst, # type: ignore # noqa: PLC2701 +) if TYPE_CHECKING: + from collections.abc import Callable + from pelicun.base import Logger colorama.init() @@ -113,9 +106,9 @@ def scale_distribution( If the specified distribution family is unsupported. """ - if truncation_limits is not None: - truncation_limits = truncation_limits * scale_factor + truncation_limits = truncation_limits.copy() + truncation_limits *= scale_factor # undefined family is considered deterministic if pd.isna(family): @@ -138,14 +131,12 @@ def scale_distribution( theta_new[0] = theta[0] * scale_factor theta_new[1] = theta[1] * scale_factor - elif family == 'deterministic': - theta_new[0] = theta[0] * scale_factor - - elif family == 'multilinear_CDF': + elif family in {'deterministic', 'multilinear_CDF'}: theta_new[0] = theta[0] * scale_factor else: - raise ValueError(f'Unsupported distribution: {family}') + msg = f'Unsupported distribution: {family}' + raise ValueError(msg) return theta_new, truncation_limits @@ -192,7 +183,6 @@ def mvn_orthotope_density( Estimate of the error in the calculated probability density. """ - # process the inputs and get the number of dimensions mu = np.atleast_1d(mu) cov = np.atleast_2d(cov) @@ -233,10 +223,7 @@ def mvn_orthotope_density( np.putmask(infin, lowinf * uppinf, -1) # prepare the correlation coefficients - if ndim == 1: - correl = np.array([0.00]) - else: - correl = corr[np.tril_indices(ndim, -1)] + correl = np.array([0.0]) if ndim == 1 else corr[np.tril_indices(ndim, -1)] # estimate the density eps_alpha, alpha, _ = mvndst(lower, upper, infin, correl) @@ -248,7 +235,7 @@ def _get_theta( params: np.ndarray, inits: np.ndarray, dist_list: np.ndarray ) -> np.ndarray: """ - Returns the parameters of the target distributions. + Return the parameters of the target distributions. Uses the parameter values from the optimization algorithm (that are relative to the initial values) and the initial values to @@ -274,13 +261,10 @@ def _get_theta( If any of the distributions is unsupported. """ - theta = np.zeros(inits.shape) for i, (params_i, inits_i, dist_i) in enumerate(zip(params, inits, dist_list)): - if dist_i in {'normal', 'normal_std', 'lognormal'}: - # Standard deviation is used directly for 'normal' and # 'lognormal' sig = ( @@ -296,7 +280,6 @@ def _get_theta( theta[i, 1] = sig elif dist_i == 'normal_cov': - # Note that the CoV is used for 'normal_cov' sig = np.exp(np.log(inits_i[1]) + params_i[1]) @@ -307,7 +290,8 @@ def _get_theta( theta[i, 1] = sig else: - raise ValueError(f'Unsupported distribution: {dist_i}') + msg = f'Unsupported distribution: {dist_i}' + raise ValueError(msg) return theta @@ -338,24 +322,18 @@ def _get_limit_probs( If any of the distributions is unsupported. """ - if distribution in {'normal', 'normal_std', 'normal_cov', 'lognormal'}: a, b = limits mu = theta[0] sig = theta[1] if distribution != 'normal_COV' else np.abs(mu) * theta[1] - if np.isnan(a): - p_a = 0.0 - else: - p_a = norm.cdf((a - mu) / sig) + p_a = 0.0 if np.isnan(a) else norm.cdf((a - mu) / sig) - if np.isnan(b): - p_b = 1.0 - else: - p_b = norm.cdf((b - mu) / sig) + p_b = 1.0 if np.isnan(b) else norm.cdf((b - mu) / sig) else: - raise ValueError(f'Unsupported distribution: {distribution}') + msg = f'Unsupported distribution: {distribution}' + raise ValueError(msg) return p_a, p_b @@ -395,7 +373,6 @@ def _get_std_samples( If any of the distributions is unsupported. """ - std_samples = np.zeros(samples.shape) for i, (samples_i, theta_i, tr_lim_i, dist_i) in enumerate( @@ -409,10 +386,11 @@ def _get_std_samples( True in (samples_i > lim_high).tolist() or True in (samples_i < lim_low).tolist() ): - raise ValueError( + msg = ( 'One or more sample values lie outside ' 'of the specified truncation limits.' ) + raise ValueError(msg) # first transform from normal to uniform uni_samples = norm.cdf(samples_i, loc=theta_i[0], scale=theta_i[1]) @@ -429,13 +407,16 @@ def _get_std_samples( std_samples[i] = norm.ppf(uni_samples, loc=0.0, scale=1.0) else: - raise ValueError(f'Unsupported distribution: {dist_i}') + msg = f'Unsupported distribution: {dist_i}' + raise ValueError(msg) return std_samples def _get_std_corr_matrix(std_samples: np.ndarray) -> np.ndarray | None: """ + Estimate the correlation matrix. + Estimate the correlation matrix of the given standard normal samples. Ensure that the correlation matrix is positive semidefinite. @@ -457,9 +438,9 @@ def _get_std_corr_matrix(std_samples: np.ndarray) -> np.ndarray | None: If any of the elements of std_samples is np.inf or np.nan """ - if True in np.isinf(std_samples) or True in np.isnan(std_samples): - raise ValueError('std_samples array must not contain inf or NaN values') + msg = 'std_samples array must not contain inf or NaN values' + raise ValueError(msg) n_dims, n_samples = std_samples.shape @@ -482,7 +463,7 @@ def _get_std_corr_matrix(std_samples: np.ndarray) -> np.ndarray | None: # otherwise, we can try to fix the matrix using SVD except np.linalg.LinAlgError: try: - U, s, _ = svd( + u_matrix, s_matrix, _ = svd( rho_hat, ) @@ -490,13 +471,15 @@ def _get_std_corr_matrix(std_samples: np.ndarray) -> np.ndarray | None: # if this also fails, we give up return None - S = np.diagflat(s) + s_diag = np.diagflat(s_matrix) - rho_hat = U @ S @ U.T + rho_hat = u_matrix @ s_diag @ u_matrix.T np.fill_diagonal(rho_hat, 1.0) # check if we introduced any unreasonable values - if (np.max(rho_hat) > 1.01) or (np.min(rho_hat) < -1.01): + vmax = 1.01 + vmin = -1.01 + if (np.max(rho_hat) > vmax) or (np.min(rho_hat) < vmin): return None # round values to 1.0 and -1.0, if needed @@ -511,7 +494,7 @@ def _get_std_corr_matrix(std_samples: np.ndarray) -> np.ndarray | None: def _mvn_scale(x: np.ndarray, rho: np.ndarray) -> np.ndarray: """ - Scaling utility function + Scaling utility function. Parameters ---------- @@ -533,14 +516,15 @@ def _mvn_scale(x: np.ndarray, rho: np.ndarray) -> np.ndarray: rho_0 = np.eye(n_dims, n_dims) a = mvn.pdf(x, mean=np.zeros(n_dims), cov=rho_0) - a[a < 1.0e-10] = 1.0e-10 + small_num = 1.0e-10 + a[a < small_num] = small_num b = mvn.pdf(x, mean=np.zeros(n_dims), cov=rho) return b / a -def _neg_log_likelihood( +def _neg_log_likelihood( # noqa: C901 params: np.ndarray, inits: np.ndarray, bnd_lower: np.ndarray, @@ -550,9 +534,11 @@ def _neg_log_likelihood( tr_limits: np.ndarray, det_limits: list[np.ndarray], censored_count: int, - enforce_bounds: bool = False, + enforce_bounds: bool = False, # noqa: FBT001, FBT002 ) -> float: """ + Calculate negative log likelihood. + Calculate the negative log likelihood of the given data samples given the parameter values and distribution information. @@ -562,18 +548,18 @@ def _neg_log_likelihood( Parameters ---------- - params : ndarray + params: ndarray 1D array with the parameter values to be assessed. - inits : ndarray + inits: ndarray 1D array with the initial estimates for the distribution parameters. - bnd_lower : ndarray + bnd_lower: ndarray 1D array with the lower bounds for the distribution parameters. - bnd_upper : ndarray + bnd_upper: ndarray 1D array with the upper bounds for the distribution parameters. - samples : ndarray + samples: ndarray 2D array with the data samples. Each column corresponds to a different random variable. dist_list: str ndarray of length D @@ -581,11 +567,11 @@ def _neg_log_likelihood( tr_limits: float ndarray Dx2 2D array with rows that represent [a, b] pairs of truncation limits. - det_limits : list + det_limits: list List with the detection limits for each random variable. - censored_count : int + censored_count: int Number of censored samples in the data. - enforce_bounds : bool, optional + enforce_bounds: bool, optional If True, the parameters are only considered valid if they are within the bounds defined by bnd_lower and bnd_upper. The default value is False. @@ -594,10 +580,11 @@ def _neg_log_likelihood( ------- float The negative log likelihood of the data given the distribution parameters. - """ + """ # First, check if the parameters are within the pre-defined bounds - # TODO: check if it is more efficient to use a bounded minimization algo + # TODO(AZ): check if it is more efficient to use a bounded + # minimization algo if enforce_bounds: if not ((params > bnd_lower) & (params < bnd_upper)).all(0): # if they are not, then return a large value to discourage the @@ -657,8 +644,8 @@ def _neg_log_likelihood( p_l, p_u = _get_limit_probs(det_lim_i, dist_i, theta_i) # rescale detection limits to consider truncation - p_l, p_u = [np.min([np.max([lim, p_a]), p_b]) for lim in (p_l, p_u)] - p_l, p_u = [(lim - p_a) / (p_b - p_a) for lim in (p_l, p_u)] + p_l, p_u = (np.min([np.max([lim, p_a]), p_b]) for lim in (p_l, p_u)) + p_l, p_u = ((lim - p_a) / (p_b - p_a) for lim in (p_l, p_u)) # transform limits to standard normal space det_lower[i], det_upper[i] = norm.ppf([p_l, p_u], loc=0.0, scale=1.0) @@ -686,8 +673,8 @@ def _neg_log_likelihood( # take the product of likelihoods calculated in each dimension scale = _mvn_scale(std_samples.T, rho_hat) - # TODO: We can almost surely replace the product of likelihoods with a call - # to mvn() + # TODO(AZ): We can almost surely replace the product of likelihoods + # with a call to mvn() likelihoods = np.prod(likelihoods, axis=0) * scale # Zeros are a result of limited floating point precision. Replace them @@ -696,25 +683,24 @@ def _neg_log_likelihood( likelihoods = np.clip(likelihoods, a_min=np.nextafter(0, 1), a_max=None) # calculate the total negative log likelihood - NLL = -( + null = -( np.sum(np.log(likelihoods)) # from samples + censored_count * np.log(cen_likelihood) ) # censoring influence # normalize the NLL with the sample count - NLL = NLL / samples.size + return null / samples.size # print(theta[0], params, NLL) - return NLL - -def fit_distribution_to_sample( +def fit_distribution_to_sample( # noqa: C901 raw_samples: np.ndarray, distribution: str | list[str], truncation_limits: tuple[float, float] = (np.nan, np.nan), censored_count: int = 0, detection_limits: tuple[float, float] = (np.nan, np.nan), + *, multi_fit: bool = False, logger_object: Logger | None = None, ) -> tuple[np.ndarray, np.ndarray]: @@ -789,7 +775,6 @@ def fit_distribution_to_sample( If NaN values are produced during standard normal space transformation """ - samples = np.atleast_2d(raw_samples) tr_limits = np.atleast_2d(truncation_limits) det_limits = np.atleast_2d(detection_limits) @@ -856,10 +841,10 @@ def fit_distribution_to_sample( # There is nothing to gain from a time-consuming optimization if.. # the number of samples is too small - if (n_samples < 3) or ( + small_n_samples = 3 + if (n_samples < small_n_samples) or ( # there are no truncation or detection limits involved - np.all(np.isnan(tr_limits)) - and np.all(np.isnan(det_limits)) + np.all(np.isnan(tr_limits)) and np.all(np.isnan(det_limits)) ): # In this case, it is typically hard to improve on the method of # moments estimates for the parameters of the marginal distributions @@ -958,13 +943,14 @@ def fit_distribution_to_sample( # samples using that type of correlation (i.e., Gaussian copula) std_samples = _get_std_samples(samples, theta, tr_limits, dist_list) if True in np.isnan(std_samples) or True in np.isinf(std_samples): - raise ValueError( + msg = ( 'Something went wrong.' '\n' 'Conversion to standard normal space was unsuccessful. \n' 'The given samples might deviate ' 'substantially from the specified distribution.' ) + raise ValueError(msg) rho_hat = _get_std_corr_matrix(std_samples) if rho_hat is None: # If there is not enough data to produce a valid correlation matrix @@ -973,16 +959,16 @@ def fit_distribution_to_sample( np.fill_diagonal(rho_hat, 1.0) if logger_object: - logger_object.warn( - "Demand sample size too small to reliably estimate " - "the correlation matrix. Assuming uncorrelated demands." + logger_object.warning( + 'Demand sample size too small to reliably estimate ' + 'the correlation matrix. Assuming uncorrelated demands.' ) else: - print( - f"\n{Fore.RED}WARNING: Demand sample size " - f"too small to reliably estimate " - f"the correlation matrix. Assuming " - f"uncorrelated demands.{Style.RESET_ALL}" + print( # noqa: T201 + '\nWARNING: Demand sample size ' + 'too small to reliably estimate ' + 'the correlation matrix. Assuming ' + 'uncorrelated demands.' ) for d_i, distr in enumerate(dist_list): @@ -996,15 +982,16 @@ def fit_distribution_to_sample( elif distr in {'normal', 'normal_cov'}: # replace standard deviation with coefficient of variation # note: this results in cov=inf if the mean is zero. - if np.abs(theta[d_i][0]) < 1.0e-40: + almost_zero = 1.0e-40 + if np.abs(theta[d_i][0]) < almost_zero: theta[d_i][1] = np.inf else: - theta[d_i][1] = theta[d_i][1] / np.abs(theta[d_i][0]) + theta[d_i][1] /= np.abs(theta[d_i][0]) return theta, rho_hat -def _OLS_percentiles( +def _OLS_percentiles( # noqa: N802 params: tuple[float, float], values: np.ndarray, perc: np.ndarray, family: str ) -> float: """ @@ -1012,13 +999,13 @@ def _OLS_percentiles( Parameters ---------- - params : tuple of floats + params: tuple of floats The parameters of the selected distribution family. - values : float ndarray + values: float ndarray The sample values for which the percentiles are requested. - perc : float ndarray + perc: float ndarray The requested percentile(s). - family : str + family: str The distribution family to use for the percentile estimation. Can be either 'normal' or 'lognormal'. @@ -1033,7 +1020,6 @@ def _OLS_percentiles( If `family` is not 'normal' or 'lognormal'. """ - if family == 'normal': theta_0 = params[0] theta_1 = params[1] @@ -1056,7 +1042,8 @@ def _OLS_percentiles( val_hat = np.exp(norm.ppf(perc, loc=np.log(theta_0), scale=theta_1)) else: - raise ValueError(f"Distribution family not recognized: {family}") + msg = f'Distribution family not recognized: {family}' + raise ValueError(msg) return np.sum((val_hat - values) ** 2.0) @@ -1088,7 +1075,6 @@ def fit_distribution_to_percentiles( Parameters of the fitted distribution. """ - out_list = [] percentiles_np = np.array(percentiles) @@ -1103,18 +1089,14 @@ def fit_distribution_to_percentiles( if family == 'normal': inits.append( - ( - np.abs(values[extreme_id] - inits[0]) - / np.abs(norm.ppf(percentiles_np[extreme_id], loc=0, scale=1)) - ) + np.abs(values[extreme_id] - inits[0]) + / np.abs(norm.ppf(percentiles_np[extreme_id], loc=0, scale=1)) ) elif family == 'lognormal': inits.append( - ( - np.abs(np.log(values[extreme_id] / inits[0])) - / np.abs(norm.ppf(percentiles_np[extreme_id], loc=0, scale=1)) - ) + np.abs(np.log(values[extreme_id] / inits[0])) + / np.abs(norm.ppf(percentiles_np[extreme_id], loc=0, scale=1)) ) out_list.append( @@ -1131,21 +1113,18 @@ def fit_distribution_to_percentiles( return families[best_out_id], out_list[best_out_id].x -class BaseRandomVariable(ABC): - """ - Base abstract class for different types of random variables. - - """ +class BaseRandomVariable(ABC): # noqa: B024 + """Base abstract class for different types of random variables.""" __slots__: list[str] = [ - 'name', - 'distribution', - 'f_map', - '_uni_samples', 'RV_set', - '_sample_DF', '_sample', + '_sample_DF', 'anchor', + 'distribution', + 'f_map', + 'name', + 'uni_samples', ] def __init__( @@ -1153,9 +1132,9 @@ def __init__( name: str, f_map: Callable | None = None, anchor: BaseRandomVariable | None = None, - ): + ) -> None: """ - Initializes a RandomVariable object. + Instantiate a RandomVariable object. Parameters ---------- @@ -1170,18 +1149,11 @@ def __init__( the attributes of this variable and its anchor do not have to be identical. - Raises - ------ - ValueError - If there are issues with the specified distribution theta - parameters. - """ - self.name = name self.distribution: str | None = None self.f_map = f_map - self._uni_samples: np.ndarray | None = None + self.uni_samples: np.ndarray | None = None self.RV_set: RandomVariableSet | None = None self._sample_DF: pd.Series | None = None self._sample: np.ndarray | None = None @@ -1220,7 +1192,7 @@ def sample(self, value: np.ndarray) -> None: self._sample_DF = pd.Series(value) @property - def sample_DF(self) -> pd.Series | None: + def sample_DF(self) -> pd.Series | None: # noqa: N802 """ Return the empirical or generated sample in a pandas Series. @@ -1247,12 +1219,12 @@ def uni_sample(self) -> np.ndarray | None: The sample from the controlling uniform distribution. """ - return self.anchor._uni_samples + return self.anchor.uni_samples @uni_sample.setter def uni_sample(self, value: np.ndarray) -> None: """ - Assign the controlling sample to the random variable + Assign the controlling sample to the random variable. Parameters ---------- @@ -1260,13 +1232,11 @@ def uni_sample(self, value: np.ndarray) -> None: An array of floating point values in the [0, 1] domain. """ - self._uni_samples = value + self.uni_samples = value class RandomVariable(BaseRandomVariable): - """ - Random variable that needs `values` in `inverse_transform` - """ + """Random variable that needs `values` in `inverse_transform`.""" __slots__: list[str] = [] @@ -1278,9 +1248,9 @@ def __init__( truncation_limits: np.ndarray | None = None, f_map: Callable | None = None, anchor: BaseRandomVariable | None = None, - ): + ) -> None: """ - Instantiates a normal random variable. + Instantiate a normal random variable. Parameters ---------- @@ -1313,32 +1283,33 @@ def __init__( ) @abstractmethod - def inverse_transform(self, values): + def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ - Uses inverse probability integral transformation on the + Evaluate the inverse CDF. + + Usses inverse probability integral transformation on the provided values. """ def inverse_transform_sampling(self) -> None: """ - Creates a sample using inverse probability integral - transformation. + Create a sample with inverse transform sampling. Raises ------ ValueError If there is no available uniform sample. + """ if self.uni_sample is None: - raise ValueError('No available uniform sample.') + msg = 'No available uniform sample.' + raise ValueError(msg) self.sample = self.inverse_transform(self.uni_sample) class UtilityRandomVariable(BaseRandomVariable): - """ - Random variable that needs `sample_size` in `inverse_transform` - """ + """Random variable that needs `sample_size` in `inverse_transform`.""" __slots__: list[str] = [] @@ -1348,9 +1319,9 @@ def __init__( name: str, f_map: Callable | None = None, anchor: BaseRandomVariable | None = None, - ): + ) -> None: """ - Instantiates a normal random variable. + Instantiate a normal random variable. Parameters ---------- @@ -1373,26 +1344,22 @@ def __init__( ) @abstractmethod - def inverse_transform(self, sample_size): + def inverse_transform(self, sample_size: int) -> np.ndarray: """ - Uses inverse probability integral transformation on the + Evaluate the inverse CDF. + + Usses inverse probability integral transformation on the provided values. """ def inverse_transform_sampling(self, sample_size: int) -> None: - """ - Creates a sample using inverse probability integral - transformation. - """ + """Create a sample with inverse transform sampling.""" self.sample = self.inverse_transform(sample_size) class NormalRandomVariable(RandomVariable): - """ - Normal random variable. - - """ + """Normal random variable.""" __slots__: list[str] = ['theta', 'truncation_limits'] @@ -1403,7 +1370,8 @@ def __init__( truncation_limits: np.ndarray | None = None, f_map: Callable | None = None, anchor: BaseRandomVariable | None = None, - ): + ) -> None: + """Instantiate a Normal random variable.""" if truncation_limits is None: truncation_limits = np.array((np.nan, np.nan)) super().__init__( @@ -1419,8 +1387,7 @@ def __init__( def cdf(self, values: np.ndarray) -> np.ndarray: """ - Returns the Cumulative Density Function (CDF) at the specified - values. + Return the CDF at the given values. Parameters ---------- @@ -1443,7 +1410,7 @@ def cdf(self, values: np.ndarray) -> np.ndarray: if np.isnan(b): b = np.inf - p_a, p_b = [norm.cdf((lim - mu) / sig) for lim in (a, b)] + p_a, p_b = (norm.cdf((lim - mu) / sig) for lim in (a, b)) # cap the values at the truncation limits values = np.minimum(np.maximum(values, a), b) @@ -1459,8 +1426,10 @@ def cdf(self, values: np.ndarray) -> np.ndarray: return result - def inverse_transform(self, values): + def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ + Evaluate the inverse CDF. + Evaluates the inverse of the Cumulative Density Function (CDF) for the given values. Used to generate random variable realizations. @@ -1482,7 +1451,6 @@ def inverse_transform(self, values): too small """ - mu, sig = self.theta[:2] if np.any(~np.isnan(self.truncation_limits)): @@ -1493,16 +1461,17 @@ def inverse_transform(self, values): if np.isnan(b): b = np.inf - p_a, p_b = [norm.cdf((lim - mu) / sig) for lim in (a, b)] + p_a, p_b = (norm.cdf((lim - mu) / sig) for lim in (a, b)) if p_b - p_a == 0: - raise ValueError( - "The probability mass within the truncation limits is " - "too small and the truncated distribution cannot be " - "sampled with sufficiently high accuracy. This is most " - "probably due to incorrect truncation limits set for " - "the distribution." + msg = ( + 'The probability mass within the truncation limits is ' + 'too small and the truncated distribution cannot be ' + 'sampled with sufficiently high accuracy. This is most ' + 'probably due to incorrect truncation limits set for ' + 'the distribution.' ) + raise ValueError(msg) result = norm.ppf(values * (p_b - p_a) + p_a, loc=mu, scale=sig) @@ -1530,7 +1499,8 @@ def __init__( truncation_limits: np.ndarray | None = None, f_map: Callable | None = None, anchor: BaseRandomVariable | None = None, - ): + ) -> None: + """Instantiate a Normal_STD random variable.""" mean, std = theta[:2] theta = np.array([mean, std]) super().__init__(name, theta, truncation_limits, f_map, anchor) @@ -1554,11 +1524,22 @@ def __init__( truncation_limits: np.ndarray | None = None, f_map: Callable | None = None, anchor: BaseRandomVariable | None = None, - ): + ) -> None: + """ + Instantiate a Normal_COV random variable. + + Raises + ------ + ValueError + If the specified mean is zero. + + """ mean, cov = theta[:2] - if np.abs(mean) < 1e-40: - raise ValueError('The mean of Normal_COV RVs cannot be zero.') + almost_zero = 1e-40 + if np.abs(mean) < almost_zero: + msg = 'The mean of Normal_COV RVs cannot be zero.' + raise ValueError(msg) std = mean * cov theta = np.array([mean, std]) @@ -1566,10 +1547,7 @@ def __init__( class LogNormalRandomVariable(RandomVariable): - """ - Lognormal random variable. - - """ + """Lognormal random variable.""" __slots__: list[str] = ['theta', 'truncation_limits'] @@ -1577,10 +1555,11 @@ def __init__( self, name: str, theta: np.ndarray, - truncation_limits=None, - f_map=None, - anchor=None, - ): + truncation_limits: np.ndarray | None = None, + f_map: Callable | None = None, + anchor: BaseRandomVariable | None = None, + ) -> None: + """Instantiate a LogNormal random variable.""" if truncation_limits is None: truncation_limits = np.array((np.nan, np.nan)) super().__init__( @@ -1596,8 +1575,7 @@ def __init__( def cdf(self, values: np.ndarray) -> np.ndarray: """ - Returns the Cumulative Density Function (CDF) at the specified - values. + Return the CDF at the given values. Parameters ---------- @@ -1607,7 +1585,7 @@ def cdf(self, values: np.ndarray) -> np.ndarray: Returns ------- ndarray - CDF values + 1D float ndarray containing CDF values """ theta, beta = self.theta[:2] @@ -1620,9 +1598,9 @@ def cdf(self, values: np.ndarray) -> np.ndarray: if np.isnan(b): b = np.inf - p_a, p_b = [ + p_a, p_b = ( norm.cdf((np.log(lim) - np.log(theta)) / beta) for lim in (a, b) - ] + ) # cap the values at the truncation limits values = np.minimum(np.maximum(values, a), b) @@ -1642,9 +1620,10 @@ def cdf(self, values: np.ndarray) -> np.ndarray: def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ - Evaluates the inverse of the Cumulative Density Function (CDF) - for the given values. Used to generate random variable - realizations. + Evaluate the inverse CDF. + + Usses inverse probability integral transformation on the + provided values. Parameters ---------- @@ -1657,7 +1636,6 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: Inverse CDF values """ - theta, beta = self.theta[:2] if np.any(~np.isnan(self.truncation_limits)): @@ -1671,9 +1649,9 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: if np.isnan(b): b = np.inf - p_a, p_b = [ + p_a, p_b = ( norm.cdf((np.log(lim) - np.log(theta)) / beta) for lim in (a, b) - ] + ) result = np.exp( norm.ppf(values * (p_b - p_a) + p_a, loc=np.log(theta), scale=beta) @@ -1686,10 +1664,7 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: class UniformRandomVariable(RandomVariable): - """ - Uniform random variable. - - """ + """Uniform random variable.""" __slots__: list[str] = ['theta', 'truncation_limits'] @@ -1700,7 +1675,8 @@ def __init__( truncation_limits: np.ndarray | None = None, f_map: Callable | None = None, anchor: BaseRandomVariable | None = None, - ): + ) -> None: + """Instantiate a Uniform random variable.""" if truncation_limits is None: truncation_limits = np.array((np.nan, np.nan)) super().__init__( @@ -1716,8 +1692,7 @@ def __init__( def cdf(self, values: np.ndarray) -> np.ndarray: """ - Returns the Cumulative Density Function (CDF) at the specified - values. + Return the CDF at the given values. Parameters ---------- @@ -1727,7 +1702,7 @@ def cdf(self, values: np.ndarray) -> np.ndarray: Returns ------- ndarray - CDF values + 1D float ndarray containing CDF values """ a, b = self.theta[:2] @@ -1740,15 +1715,14 @@ def cdf(self, values: np.ndarray) -> np.ndarray: if np.any(~np.isnan(self.truncation_limits)): a, b = self.truncation_limits - result = uniform.cdf(values, loc=a, scale=(b - a)) - - return result + return uniform.cdf(values, loc=a, scale=(b - a)) def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ - Evaluates the inverse of the Cumulative Density Function (CDF) - for the given values. Used to generate random variable - realizations. + Evaluate the inverse CDF. + + Usses inverse probability integral transformation on the + provided values. Parameters ---------- @@ -1771,16 +1745,11 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: if np.any(~np.isnan(self.truncation_limits)): a, b = self.truncation_limits - result = uniform.ppf(values, loc=a, scale=(b - a)) - - return result + return uniform.ppf(values, loc=a, scale=(b - a)) class WeibullRandomVariable(RandomVariable): - """ - Weibull random variable. - - """ + """Weibull random variable.""" __slots__: list[str] = ['theta', 'truncation_limits'] @@ -1791,7 +1760,8 @@ def __init__( truncation_limits: np.ndarray | None = None, f_map: Callable | None = None, anchor: BaseRandomVariable | None = None, - ): + ) -> None: + """Instantiate a Weibull random variable.""" if truncation_limits is None: truncation_limits = np.array((np.nan, np.nan)) super().__init__( @@ -1807,18 +1777,17 @@ def __init__( def cdf(self, values: np.ndarray) -> np.ndarray: """ - Returns the Cumulative Density Function (CDF) at the specified - values. + Return the CDF at the given values. Parameters ---------- values: 1D float ndarray - Values for which to evaluate the CDF. + Values for which to evaluate the CDF Returns ------- ndarray - CDF values. + 1D float ndarray containing CDF values """ lambda_, kappa = self.theta[:2] @@ -1832,7 +1801,7 @@ def cdf(self, values: np.ndarray) -> np.ndarray: if np.isnan(b): b = np.inf - p_a, p_b = [weibull_min.cdf(lim, kappa, scale=lambda_) for lim in (a, b)] + p_a, p_b = (weibull_min.cdf(lim, kappa, scale=lambda_) for lim in (a, b)) # cap the values at the truncation limits values = np.minimum(np.maximum(values, a), b) @@ -1854,22 +1823,22 @@ def cdf(self, values: np.ndarray) -> np.ndarray: def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ - Evaluates the inverse of the Cumulative Density Function (CDF) - for the given values. Used to generate random variable - realizations. + Evaluate the inverse CDF. + + Usses inverse probability integral transformation on the + provided values. Parameters ---------- values: 1D float ndarray - Values for which to evaluate the inverse CDF. + Values for which to evaluate the inverse CDF Returns ------- ndarray - Inverse CDF values. + Inverse CDF values """ - lambda_, kappa = self.theta[:2] if np.any(~np.isnan(self.truncation_limits)): @@ -1883,7 +1852,7 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: if np.isnan(b): b = np.inf - p_a, p_b = [weibull_min.cdf(lim, kappa, scale=lambda_) for lim in (a, b)] + p_a, p_b = (weibull_min.cdf(lim, kappa, scale=lambda_) for lim in (a, b)) result = weibull_min.ppf( values * (p_b - p_a) + p_a, kappa, scale=lambda_ @@ -1897,9 +1866,11 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: class MultilinearCDFRandomVariable(RandomVariable): """ - Multilinear CDF random variable. This RV is defined by specifying - the points that define its Cumulative Density Function (CDF), and - linear interpolation between them. + Multilinear CDF random variable. + + This RV is defined by specifying the points that define its + Cumulative Density Function (CDF), and linear interpolation + between them. """ @@ -1912,7 +1883,18 @@ def __init__( truncation_limits: np.ndarray | None = None, f_map: Callable | None = None, anchor: BaseRandomVariable | None = None, - ): + ) -> None: + """ + Instantiate a MultilinearCDF random variable. + + Raises + ------ + ValueError + In case of incompatible input parameters. + NotImplementedError + If truncation limits are specified. + + """ if truncation_limits is None: truncation_limits = np.array((np.nan, np.nan)) super().__init__( @@ -1925,52 +1907,52 @@ def __init__( self.distribution = 'multilinear_CDF' if not np.all(np.isnan(truncation_limits)): - raise NotImplementedError( - f'{self.distribution} RVs do not support truncation' - ) + msg = f'{self.distribution} RVs do not support truncation' + raise NotImplementedError(msg) y_1 = theta[0, 1] if y_1 != 0.00: - raise ValueError( - "For multilinear CDF random variables, y_1 should be set to 0.00" - ) + msg = 'For multilinear CDF random variables, y_1 should be set to 0.00' + raise ValueError(msg) y_n = theta[-1, 1] if y_n != 1.00: - raise ValueError( - "For multilinear CDF random variables, y_n should be set to 1.00" - ) + msg = 'For multilinear CDF random variables, y_n should be set to 1.00' + raise ValueError(msg) x_s = theta[:, 0] if not np.array_equal(np.sort(x_s), x_s): - raise ValueError( - "For multilinear CDF random variables, " - "Xs should be specified in ascending order" + msg = ( + 'For multilinear CDF random variables, ' + 'Xs should be specified in ascending order' ) + raise ValueError(msg) if np.any(np.isclose(np.diff(x_s), 0.00)): - raise ValueError( - "For multilinear CDF random variables, " - "Xs should be specified in strictly ascending order" + msg = ( + 'For multilinear CDF random variables, ' + 'Xs should be specified in strictly ascending order' ) + raise ValueError(msg) y_s = theta[:, 1] if not np.array_equal(np.sort(y_s), y_s): - raise ValueError( - "For multilinear CDF random variables, " - "Ys should be specified in ascending order" + msg = ( + 'For multilinear CDF random variables, ' + 'Ys should be specified in ascending order' ) + raise ValueError(msg) if np.any(np.isclose(np.diff(y_s), 0.00)): - raise ValueError( - "For multilinear CDF random variables, " - "Ys should be specified in strictly ascending order" + msg = ( + 'For multilinear CDF random variables, ' + 'Ys should be specified in strictly ascending order' ) + raise ValueError(msg) self.theta = np.atleast_1d(theta) def cdf(self, values: np.ndarray) -> np.ndarray: """ - Returns the Cumulative Density Function (CDF) at the specified - values. + Return the CDF at the given values. Parameters ---------- @@ -1980,22 +1962,21 @@ def cdf(self, values: np.ndarray) -> np.ndarray: Returns ------- ndarray - CDF values + 1D float ndarray containing CDF values """ x_i = [-np.inf] + [x[0] for x in self.theta] + [np.inf] y_i = [0.00] + [x[1] for x in self.theta] + [1.00] # Using Numpy's interp for linear interpolation - result = np.interp(values, x_i, y_i, left=0.00, right=1.00) - - return result + return np.interp(values, x_i, y_i, left=0.00, right=1.00) def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ - Evaluates the inverse of the Cumulative Density Function (CDF) - for the given values. Used to generate random variable - realizations. + Evaluate the inverse CDF. + + Usses inverse probability integral transformation on the + provided values. Parameters ---------- @@ -2008,7 +1989,6 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: Inverse CDF values """ - x_i = [x[0] for x in self.theta] y_i = [x[1] for x in self.theta] @@ -2019,46 +1999,43 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: # extrapolate). # note: swapping the roles of x_i and y_i for inverse # interpolation - result = np.interp(values, y_i, x_i) - - return result + return np.interp(values, y_i, x_i) class EmpiricalRandomVariable(RandomVariable): - """ - Empirical random variable. - - """ + """Empirical random variable.""" __slots__: list[str] = ['_raw_samples'] def __init__( self, name: str, - raw_samples: np.ndarray, + theta: np.ndarray, truncation_limits: np.ndarray | None = None, f_map: Callable | None = None, anchor: BaseRandomVariable | None = None, - ): + ) -> None: + """Instantiate an Empirical random variable.""" if truncation_limits is None: truncation_limits = np.array((np.nan, np.nan)) super().__init__( name=name, - theta=raw_samples, + theta=theta, truncation_limits=truncation_limits, f_map=f_map, anchor=anchor, ) self.distribution = 'empirical' if not np.all(np.isnan(truncation_limits)): - raise NotImplementedError( - f'{self.distribution} RVs do not support truncation' - ) + msg = f'{self.distribution} RVs do not support truncation' + raise NotImplementedError(msg) - self._raw_samples = np.atleast_1d(raw_samples) + self._raw_samples = np.atleast_1d(theta) def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ + Evaluate the inverse CDF. + Maps given values to their corresponding positions within the empirical data array, simulating an inverse transformation based on the empirical distribution. This can be seen as a @@ -2079,34 +2056,30 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ s_ids = (values * len(self._raw_samples)).astype(int) - result = self._raw_samples[s_ids] - return result + return self._raw_samples[s_ids] class CoupledEmpiricalRandomVariable(UtilityRandomVariable): - """ - Coupled empirical random variable. - - """ + """Coupled empirical random variable.""" __slots__: list[str] = ['_raw_samples'] def __init__( self, name: str, - raw_samples: np.ndarray, + theta: np.ndarray, truncation_limits: np.ndarray | None = None, f_map: Callable | None = None, anchor: BaseRandomVariable | None = None, - ): + ) -> None: """ - Instantiates a coupled empirical random variable. + Instantiate a coupled empirical random variable. Parameters ---------- name: string A unique string that identifies the random variable. - raw_samples: 1D float ndarray + theta: 1D float ndarray Samples from which to draw empirical realizations. truncation_limits: 2D float ndarray Not supported for CoupledEmpirical RVs. @@ -2135,14 +2108,15 @@ def __init__( ) self.distribution = 'coupled_empirical' if not np.all(np.isnan(truncation_limits)): - raise NotImplementedError( - f'{self.distribution} RVs do not support truncation' - ) + msg = f'{self.distribution} RVs do not support truncation' + raise NotImplementedError(msg) - self._raw_samples = np.atleast_1d(raw_samples) + self._raw_samples = np.atleast_1d(theta) def inverse_transform(self, sample_size: int) -> np.ndarray: """ + Evaluate the inverse CDF. + Generates a new sample array from the existing empirical data by repeating the dataset until it matches the requested sample size. @@ -2162,20 +2136,15 @@ def inverse_transform(self, sample_size: int) -> np.ndarray: dataset. """ - raw_sample_count = len(self._raw_samples) new_sample = np.tile( self._raw_samples, int(sample_size / raw_sample_count) + 1 ) - result = new_sample[:sample_size] - return result + return new_sample[:sample_size] class DeterministicRandomVariable(UtilityRandomVariable): - """ - Deterministic random variable. - - """ + """Deterministic random variable.""" __slots__: list[str] = ['theta'] @@ -2186,11 +2155,12 @@ def __init__( truncation_limits: np.ndarray | None = None, f_map: Callable | None = None, anchor: BaseRandomVariable | None = None, - ): + ) -> None: """ - Instantiates a deterministic random variable. This behaves - like a RandomVariable object but represents a specific, - deterministic value. + Instantiate a deterministic random variable. + + This behaves like a RandomVariable object but represents a + specific, deterministic value. Parameters ---------- @@ -2225,15 +2195,14 @@ def __init__( ) self.distribution = 'deterministic' if not np.all(np.isnan(truncation_limits)): - raise NotImplementedError( - f'{self.distribution} RVs do not support truncation' - ) + msg = f'{self.distribution} RVs do not support truncation' + raise NotImplementedError(msg) self.theta = np.atleast_1d(theta) def inverse_transform(self, sample_size: int) -> np.ndarray: """ - Generates samples that correspond to the value. + Evaluate the inverse CDF. Parameters ---------- @@ -2246,16 +2215,11 @@ def inverse_transform(self, sample_size: int) -> np.ndarray: Sample array containing the deterministic value. """ - - result = np.full(sample_size, self.theta[0]) - return result + return np.full(sample_size, self.theta[0]) class MultinomialRandomVariable(RandomVariable): - """ - Multinomial random variable. - - """ + """Multinomial random variable.""" __slots__: list[str] = ['theta'] @@ -2266,7 +2230,18 @@ def __init__( truncation_limits: np.ndarray | None = None, f_map: Callable | None = None, anchor: BaseRandomVariable | None = None, - ): + ) -> None: + """ + Instantiate a Multinomial random variable. + + Raises + ------ + ValueError + In case of incompatible input parameters. + NotImplementedError + If truncation limits are specified. + + """ if truncation_limits is None: truncation_limits = np.array((np.nan, np.nan)) super().__init__( @@ -2277,22 +2252,24 @@ def __init__( anchor=anchor, ) if not np.all(np.isnan(truncation_limits)): - raise NotImplementedError( - f'{self.distribution} RVs do not support truncation' - ) + msg = f'{self.distribution} RVs do not support truncation' + raise NotImplementedError(msg) self.distribution = 'multinomial' if np.sum(theta) > 1.00: - raise ValueError( - f"The set of p values provided for a multinomial " - f"distribution shall sum up to less than or equal to 1.0. " - f"The provided values sum up to {np.sum(theta)}. p = " - f"{theta} ." + msg = ( + f'The set of p values provided for a multinomial ' + f'distribution shall sum up to less than or equal to 1.0. ' + f'The provided values sum up to {np.sum(theta)}. p = ' + f'{theta} .' ) + raise ValueError(msg) self.theta = np.atleast_1d(theta) def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ + Evaluate the inverse CDF. + Transforms continuous values into discrete events based on the cumulative probabilities of the multinomial distribution derived by `theta`. @@ -2316,13 +2293,13 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: values[values < p_i] = 10 + i values[values <= 1.0] = 10 + len(p_cum) - result = values - 10 - - return result + return values - 10 class RandomVariableSet: """ + Random variable set. + Represents a set of random variables, each of which is described by its own probability distribution. The set allows the user to define correlations between the random variables, and provides @@ -2340,32 +2317,36 @@ class RandomVariableSet: Defines the correlation matrix that describes the correlation between the random variables in the set. Currently, only the Gaussian copula is supported. + """ - __slots__: list[str] = ['name', '_variables', '_Rho'] + __slots__: list[str] = ['_Rho', '_variables', 'name'] - def __init__(self, name: str, RV_list: list[RandomVariable], Rho: np.ndarray): + def __init__( + self, name: str, rv_list: list[BaseRandomVariable], rho: np.ndarray + ) -> None: + """Instantiate a random variable set.""" self.name = name - if len(RV_list) > 1: + if len(rv_list) > 1: # put the RVs in a dictionary for more efficient access - reorder = np.argsort([RV.name for RV in RV_list]) - self._variables = {RV_list[i].name: RV_list[i] for i in reorder} + reorder = np.argsort([RV.name for RV in rv_list]) + self._variables = {rv_list[i].name: rv_list[i] for i in reorder} # reorder the entries in the correlation matrix to correspond to the # sorted list of RVs - self._Rho = np.asarray(Rho[(reorder)].T[(reorder)].T) + self._Rho = np.asarray(rho[(reorder)].T[(reorder)].T) else: # if there is only one variable (for testing, probably) - self._variables = {rv.name: rv for rv in RV_list} - self._Rho = np.asarray(Rho) + self._variables = {rv.name: rv for rv in rv_list} + self._Rho = np.asarray(rho) # assign this RV_set to the variables - for _, var in self._variables.items(): + for var in self._variables.values(): var.RV_set = self @property - def RV(self) -> dict[str, RandomVariable]: + def RV(self) -> dict[str, RandomVariable]: # noqa: N802 """ Returns the random variable(s) assigned to the set. @@ -2403,9 +2384,9 @@ def sample(self) -> dict[str, np.ndarray | None]: """ return {name: rv.sample for name, rv in self._variables.items()} - def Rho(self, var_subset: list[str] | None = None) -> np.ndarray: + def Rho(self, var_subset: list[str] | None = None) -> np.ndarray: # noqa: N802 """ - Returns the (subset of the) correlation matrix. + Return the (subset of the) correlation matrix. Returns ------- @@ -2428,35 +2409,34 @@ def apply_correlation(self) -> None: correlations while preserving as much as possible from the correlation matrix. """ - - U_RV = np.array([RV.uni_sample for RV_name, RV in self.RV.items()]) + u_rv = np.array([RV.uni_sample for RV_name, RV in self.RV.items()]) # First try doing the Cholesky transformation try: - N_RV = norm.ppf(U_RV) + n_rv = norm.ppf(u_rv) - L = cholesky(self._Rho, lower=True) + l_mat = cholesky(self._Rho, lower=True) - NC_RV = L @ N_RV + nc_rv = l_mat @ n_rv - UC_RV = norm.cdf(NC_RV) + uc_rv = norm.cdf(nc_rv) except np.linalg.LinAlgError: # if the Cholesky doesn't work, we need to use the more # time-consuming but more robust approach based on SVD - N_RV = norm.ppf(U_RV) + n_rv = norm.ppf(u_rv) - U, s, _ = svd( + u_mat, s_mat, _ = svd( self._Rho, ) - S = np.diagflat(np.sqrt(s)) + s_diag = np.diagflat(np.sqrt(s_mat)) - NC_RV = (N_RV.T @ S @ U.T).T + nc_rv = (n_rv.T @ s_diag @ u_mat.T).T - UC_RV = norm.cdf(NC_RV) + uc_rv = norm.cdf(nc_rv) - for RV, uc_RV in zip(self.RV.values(), UC_RV): - RV.uni_sample = uc_RV + for rv, ucrv in zip(self.RV.values(), uc_rv): + rv.uni_sample = ucrv def orthotope_density( self, @@ -2499,7 +2479,6 @@ def orthotope_density( Estimate of the error in alpha. """ - if isinstance(lower, float): lower = np.array([lower]) if isinstance(upper, float): @@ -2535,7 +2514,7 @@ def orthotope_density( lower_std = lower_std.T upper_std = upper_std.T - OD = [ + od = [ mvn_orthotope_density( mu=np.zeros(len(variables)), cov=self.Rho(var_subset), @@ -2545,32 +2524,31 @@ def orthotope_density( for l_i, u_i in zip(lower_std, upper_std) ] - return np.asarray(OD) + return np.asarray(od) class RandomVariableRegistry: - """ - Description + """Random variable registry.""" - Parameters - ---------- - - """ + __slots__: list[str] = ['_rng', '_sets', '_variables'] - __slots__: list[str] = ['_rng', '_variables', '_sets'] - - def __init__(self, rng: np.random.Generator): + def __init__(self, rng: np.random.Generator) -> None: """ + Instantiate a random variable registry. + + Parameters + ---------- rng: numpy.random._generator.Generator Random variable generator object. - e.g.: np.random.default_rng(seed) + e.g.: np.random.default_rng(seed). + """ self._rng = rng - self._variables: dict[str, RandomVariable] = {} + self._variables: dict[str, BaseRandomVariable] = {} self._sets: dict[str, RandomVariableSet] = {} @property - def RV(self) -> dict[str, RandomVariable]: + def RV(self) -> dict[str, BaseRandomVariable]: # noqa: N802 """ Returns all random variable(s) in the registry. @@ -2582,9 +2560,9 @@ def RV(self) -> dict[str, RandomVariable]: """ return self._variables - def RVs(self, keys: list[str]) -> dict[str, RandomVariable]: + def RVs(self, keys: list[str]) -> dict[str, BaseRandomVariable]: # noqa: N802 """ - Returns a subset of the random variables in the registry + Return a subset of the random variables in the registry. Parameters ---------- @@ -2599,7 +2577,7 @@ def RVs(self, keys: list[str]) -> dict[str, RandomVariable]: """ return {name: self._variables[name] for name in keys} - def add_RV(self, RV: RandomVariable) -> None: + def add_RV(self, rv: BaseRandomVariable) -> None: # noqa: N802 """ Add a new random variable to the registry. @@ -2609,12 +2587,13 @@ def add_RV(self, RV: RandomVariable) -> None: When the RV already exists in the registry """ - if RV.name in self._variables: - raise ValueError(f'RV {RV.name} already exists in the registry.') - self._variables.update({RV.name: RV}) + if rv.name in self._variables: + msg = f'RV {rv.name} already exists in the registry.' + raise ValueError(msg) + self._variables.update({rv.name: rv}) @property - def RV_set(self) -> dict[str, RandomVariableSet]: + def RV_set(self) -> dict[str, RandomVariableSet]: # noqa: N802 """ Return the random variable set(s) in the registry. @@ -2626,14 +2605,12 @@ def RV_set(self) -> dict[str, RandomVariableSet]: """ return self._sets - def add_RV_set(self, RV_set: RandomVariableSet) -> None: - """ - Add a new set of random variables to the registry - """ - self._sets.update({RV_set.name: RV_set}) + def add_RV_set(self, rv_set: RandomVariableSet) -> None: # noqa: N802 + """Add a new set of random variables to the registry.""" + self._sets.update({rv_set.name: rv_set}) @property - def RV_sample(self) -> dict[str, np.ndarray | None]: + def RV_sample(self) -> dict[str, np.ndarray | None]: # noqa: N802 """ Return the sample for every random variable in the registry. @@ -2647,11 +2624,10 @@ def RV_sample(self) -> dict[str, np.ndarray | None]: def generate_sample(self, sample_size: int, method: str) -> None: """ - Generates samples for all variables in the registry. + Generate samples for all variables in the registry. Parameters ---------- - sample_size: int The number of samples requested per variable. method: str @@ -2668,10 +2644,9 @@ def generate_sample(self, sample_size: int, method: str) -> None: When the RV parent class is Unknown """ - # Generate a dictionary with IDs of the free (non-anchored and # non-deterministic) variables - RV_list = [ + rv_list = [ RV_name for RV_name, RV in self.RV.items() if ( @@ -2679,49 +2654,49 @@ def generate_sample(self, sample_size: int, method: str) -> None: or (RV.distribution in {'deterministic', 'coupled_empirical'}) ) ] - RV_ID = {RV_name: ID for ID, RV_name in enumerate(RV_list)} - RV_count = len(RV_ID) + rv_id = {RV_name: ID for ID, RV_name in enumerate(rv_list)} + rv_count = len(rv_id) # Generate controlling samples from a uniform distribution for free RVs if 'LHS' in method: bin_low = np.array( - [self._rng.permutation(sample_size) for i in range(RV_count)] + [self._rng.permutation(sample_size) for i in range(rv_count)] ) if method == 'LHS_midpoint': - U_RV = np.ones([RV_count, sample_size]) * 0.5 - U_RV = (bin_low + U_RV) / sample_size + u_rv = np.ones([rv_count, sample_size]) * 0.5 + u_rv = (bin_low + u_rv) / sample_size elif method == 'LHS': - U_RV = self._rng.random(size=[RV_count, sample_size]) - U_RV = (bin_low + U_RV) / sample_size + u_rv = self._rng.random(size=[rv_count, sample_size]) + u_rv = (bin_low + u_rv) / sample_size elif method == 'MonteCarlo': - U_RV = self._rng.random(size=[RV_count, sample_size]) + u_rv = self._rng.random(size=[rv_count, sample_size]) # Assign the controlling samples to the RVs - for RV_name, RV_id in RV_ID.items(): - self.RV[RV_name].uni_sample = U_RV[RV_id] + for rv_name, rvid in rv_id.items(): + self.RV[rv_name].uni_sample = u_rv[rvid] # Apply correlations for the pre-defined RV sets - for RV_set in self.RV_set.values(): + for rv_set in self.RV_set.values(): # prepare the correlated uniform distribution for the set - RV_set.apply_correlation() + rv_set.apply_correlation() # Convert from uniform to the target distribution for every RV - for RV in self.RV.values(): - if isinstance(RV, UtilityRandomVariable): - RV.inverse_transform_sampling(sample_size) - elif isinstance(RV, RandomVariable): - RV.inverse_transform_sampling() + for rv in self.RV.values(): + if isinstance(rv, UtilityRandomVariable): + rv.inverse_transform_sampling(sample_size) + elif isinstance(rv, RandomVariable): + rv.inverse_transform_sampling() else: - raise NotImplementedError('Unknown RV parent class.') + msg = 'Unknown RV parent class.' + raise NotImplementedError(msg) def rv_class_map(distribution_name: str) -> type[BaseRandomVariable]: """ - Maps convenient distribution names to their corresponding random - variable class. + Map convenient distributions to their corresponding class. Parameters ---------- @@ -2756,5 +2731,6 @@ def rv_class_map(distribution_name: str) -> type[BaseRandomVariable]: 'multinomial': MultinomialRandomVariable, } if distribution_name not in distribution_map: - raise ValueError(f'Unsupported distribution: {distribution_name}') + msg = f'Unsupported distribution: {distribution_name}' + raise ValueError(msg) return distribution_map[distribution_name] diff --git a/pytest.ini b/pytest.ini index 7640f4f7f..2762a03a5 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,6 +1,7 @@ [pytest] filterwarnings = ignore:.*errors='ignore' is deprecated and will raise.*:FutureWarning + ignore:.*Downcasting object dtype arrays on.*:FutureWarning ignore:.*invalid value encountered in multiply.*:RuntimeWarning ignore:.*invalid value encountered in add.*:RuntimeWarning ignore:.*DataFrameGroupBy\.apply operated on the grouping columns.*:DeprecationWarning diff --git a/run_checks.sh b/run_checks.sh index a8b02ae24..d4a20e227 100755 --- a/run_checks.sh +++ b/run_checks.sh @@ -2,6 +2,7 @@ # Spell-check echo "Spell-checking." +echo codespell . if [ $? -ne 0 ]; then echo "Spell-checking failed." @@ -9,7 +10,8 @@ if [ $? -ne 0 ]; then fi # Run ruff for linting -echo "Linting with `ruff check`." +echo "Linting with 'ruff check --fix'." +echo ruff check --fix --output-format concise if [ $? -ne 0 ]; then echo "ruff failed." @@ -18,6 +20,7 @@ fi # Run mypy for type checking echo "Type checking with mypy." +echo mypy pelicun if [ $? -ne 0 ]; then echo "mypy failed. Exiting." @@ -26,6 +29,7 @@ fi # Run pytest for testing and generate coverage report echo "Running unit-tests." +echo python -m pytest pelicun/tests --cov=pelicun --cov-report html -n auto if [ $? -ne 0 ]; then echo "pytest failed. Exiting." @@ -33,3 +37,4 @@ if [ $? -ne 0 ]; then fi echo "All checks passed successfully." +echo diff --git a/setup.py b/setup.py index e1487f50d..69cca4f78 100644 --- a/setup.py +++ b/setup.py @@ -1,28 +1,63 @@ -""" -setup.py file of the `pelicun` package. +# +# Copyright (c) 2023 Leland Stanford Junior University +# Copyright (c) 2023 The Regents of the University of California +# +# This file is part of pelicun. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# You should have received a copy of the BSD 3-Clause License along with +# pelicun. If not, see . -""" +"""setup.py file of the `pelicun` package.""" + +from pathlib import Path + +from setuptools import find_packages, setup -import io -from setuptools import setup, find_packages import pelicun -def read(*filenames, **kwargs): - """ - Utility function to read multiple files into a string - """ +def read(*filenames, **kwargs) -> None: # noqa: ANN002, ANN003 + """Read multiple files into a string.""" encoding = kwargs.get('encoding', 'utf-8') sep = kwargs.get('sep', '\n') buf = [] for filename in filenames: - with io.open(filename, encoding=encoding) as f: + with Path(filename).open(encoding=encoding) as f: buf.append(f.read()) return sep.join(buf) long_description = read('README.md') +# TODO(JVM): update documentation requirements, remove those no longer +# used. + setup( name='pelicun', version=pelicun.__version__, @@ -62,20 +97,26 @@ def read(*filenames, **kwargs): 'pydocstyle', 'mypy', 'black', + 'ruff', 'pytest', 'pytest-cov', 'pytest-xdist', 'glob2', 'jupyter', 'jupytext', - 'sphinx', 'sphinx-autoapi', - 'nbsphinx', 'flake8-rst', 'flake8-rst-docstrings', 'pandas-stubs', 'types-colorama', 'codespell', + 'sphinx', + 'sphinx_design', + 'sphinx-rtd-theme', + 'nbsphinx', + 'numpydoc', + 'rendre>0.0.14', + 'jsonpath2', ], }, classifiers=[ From 8be695771f27f2292df865278d7a82df11e5b980 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Fri, 18 Oct 2024 17:25:45 -0700 Subject: [PATCH 06/27] Additional GitHub workflows - Format checking and linting with Ruff. - Spell checking with codespell. --- .github/workflows/format_check.yml | 11 +++++++++++ .github/workflows/lint.yml | 10 ++++++++++ .github/workflows/spell_check.yml | 15 +++++++++++++++ 3 files changed, 36 insertions(+) create mode 100644 .github/workflows/format_check.yml create mode 100644 .github/workflows/lint.yml create mode 100644 .github/workflows/spell_check.yml diff --git a/.github/workflows/format_check.yml b/.github/workflows/format_check.yml new file mode 100644 index 000000000..f779ab252 --- /dev/null +++ b/.github/workflows/format_check.yml @@ -0,0 +1,11 @@ +name: Ruff format +on: [push, pull_request] +jobs: + ruff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: chartboost/ruff-action@v1 + with: + args: 'format --check' + version: 0.6.1 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 000000000..be42eccfe --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,10 @@ +name: Ruff check +on: [push, pull_request] +jobs: + ruff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: chartboost/ruff-action@v1 + with: + version: 0.6.1 diff --git a/.github/workflows/spell_check.yml b/.github/workflows/spell_check.yml new file mode 100644 index 000000000..c894573a1 --- /dev/null +++ b/.github/workflows/spell_check.yml @@ -0,0 +1,15 @@ +name: Spell Check + +on: [push, pull_request] + +jobs: + spell-check: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Run codespell + uses: codespell-project/actions-codespell@v2 + From 75e9860d28bdac0946483e885012405bfc1999ec Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Fri, 18 Oct 2024 17:30:24 -0700 Subject: [PATCH 07/27] Update ruff version and requirements. --- .github/workflows/format_check.yml | 2 +- .github/workflows/lint.yml | 2 +- setup.py | 34 +++++++++++++----------------- 3 files changed, 17 insertions(+), 21 deletions(-) diff --git a/.github/workflows/format_check.yml b/.github/workflows/format_check.yml index f779ab252..294f1e458 100644 --- a/.github/workflows/format_check.yml +++ b/.github/workflows/format_check.yml @@ -8,4 +8,4 @@ jobs: - uses: chartboost/ruff-action@v1 with: args: 'format --check' - version: 0.6.1 + version: 0.7.0 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index be42eccfe..2546c0edb 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -7,4 +7,4 @@ jobs: - uses: actions/checkout@v4 - uses: chartboost/ruff-action@v1 with: - version: 0.6.1 + version: 0.7.0 diff --git a/setup.py b/setup.py index 69cca4f78..076c249d0 100644 --- a/setup.py +++ b/setup.py @@ -87,36 +87,32 @@ def read(*filenames, **kwargs) -> None: # noqa: ANN002, ANN003 ], extras_require={ 'development': [ - 'ruff', + 'codespell', 'flake8', 'flake8-bugbear', 'flake8-rst', 'flake8-rst-docstrings', + 'glob2', + 'jsonpath2', + 'jupyter', + 'jupytext', + 'mypy', + 'nbsphinx', + 'numpydoc', + 'pandas-stubs', + 'pydocstyle', 'pylint', 'pylint-pytest', - 'pydocstyle', - 'mypy', - 'black', - 'ruff', 'pytest', 'pytest-cov', 'pytest-xdist', - 'glob2', - 'jupyter', - 'jupytext', - 'sphinx-autoapi', - 'flake8-rst', - 'flake8-rst-docstrings', - 'pandas-stubs', - 'types-colorama', - 'codespell', + 'rendre>0.0.14', + 'ruff==0.7.0', 'sphinx', - 'sphinx_design', + 'sphinx-autoapi', 'sphinx-rtd-theme', - 'nbsphinx', - 'numpydoc', - 'rendre>0.0.14', - 'jsonpath2', + 'sphinx_design', + 'types-colorama', ], }, classifiers=[ From 541e0665d7b5514426bd4faad484e988d8ce0d8c Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Wed, 23 Oct 2024 12:43:41 -0700 Subject: [PATCH 08/27] Rename variable --- pelicun/uq.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pelicun/uq.py b/pelicun/uq.py index fdeff8c49..a451d4665 100644 --- a/pelicun/uq.py +++ b/pelicun/uq.py @@ -463,7 +463,7 @@ def _get_std_corr_matrix(std_samples: np.ndarray) -> np.ndarray | None: # otherwise, we can try to fix the matrix using SVD except np.linalg.LinAlgError: try: - u_matrix, s_matrix, _ = svd( + u_matrix, s_vector, _ = svd( rho_hat, ) @@ -471,7 +471,7 @@ def _get_std_corr_matrix(std_samples: np.ndarray) -> np.ndarray | None: # if this also fails, we give up return None - s_diag = np.diagflat(s_matrix) + s_diag = np.diagflat(s_vector) rho_hat = u_matrix @ s_diag @ u_matrix.T np.fill_diagonal(rho_hat, 1.0) From 07dc45fc2c21a5791231e6aa041293c94668d025 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Wed, 23 Oct 2024 12:45:21 -0700 Subject: [PATCH 09/27] Rename variable. --- pelicun/uq.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pelicun/uq.py b/pelicun/uq.py index a451d4665..0858c625b 100644 --- a/pelicun/uq.py +++ b/pelicun/uq.py @@ -683,13 +683,13 @@ def _neg_log_likelihood( # noqa: C901 likelihoods = np.clip(likelihoods, a_min=np.nextafter(0, 1), a_max=None) # calculate the total negative log likelihood - null = -( + negative_log_likelihood = -( np.sum(np.log(likelihoods)) # from samples + censored_count * np.log(cen_likelihood) ) # censoring influence # normalize the NLL with the sample count - return null / samples.size + return negative_log_likelihood / samples.size # print(theta[0], params, NLL) From 69a867a2b0266f06bea06820edd75eb46c978c51 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Wed, 23 Oct 2024 12:47:17 -0700 Subject: [PATCH 10/27] Reorder lines. --- pelicun/uq.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pelicun/uq.py b/pelicun/uq.py index 0858c625b..dd595c15b 100644 --- a/pelicun/uq.py +++ b/pelicun/uq.py @@ -688,11 +688,11 @@ def _neg_log_likelihood( # noqa: C901 + censored_count * np.log(cen_likelihood) ) # censoring influence + # print(theta[0], params, NLL) + # normalize the NLL with the sample count return negative_log_likelihood / samples.size - # print(theta[0], params, NLL) - def fit_distribution_to_sample( # noqa: C901 raw_samples: np.ndarray, From 04e64a9c8571dedfdde9e1289803c737d5bb9fe2 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Wed, 23 Oct 2024 12:58:19 -0700 Subject: [PATCH 11/27] Rename attribute. - Rename `uni_samples` to `_uni_sample`. - Modify `uni_sample` property to avoid accessing private attribute of `self.anchor`. --- pelicun/uq.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/pelicun/uq.py b/pelicun/uq.py index dd595c15b..fae3a747f 100644 --- a/pelicun/uq.py +++ b/pelicun/uq.py @@ -393,18 +393,18 @@ def _get_std_samples( raise ValueError(msg) # first transform from normal to uniform - uni_samples = norm.cdf(samples_i, loc=theta_i[0], scale=theta_i[1]) + uni_sample = norm.cdf(samples_i, loc=theta_i[0], scale=theta_i[1]) # replace 0 and 1 values with the nearest float - uni_samples[uni_samples == 0] = np.nextafter(0, 1) - uni_samples[uni_samples == 1] = np.nextafter(1, -1) + uni_sample[uni_sample == 0] = np.nextafter(0, 1) + uni_sample[uni_sample == 1] = np.nextafter(1, -1) # consider truncation if needed p_a, p_b = _get_limit_probs(tr_lim_i, dist_i, theta_i) - uni_samples = (uni_samples - p_a) / (p_b - p_a) + uni_sample = (uni_sample - p_a) / (p_b - p_a) # then transform from uniform to standard normal - std_samples[i] = norm.ppf(uni_samples, loc=0.0, scale=1.0) + std_samples[i] = norm.ppf(uni_sample, loc=0.0, scale=1.0) else: msg = f'Unsupported distribution: {dist_i}' @@ -1120,11 +1120,11 @@ class BaseRandomVariable(ABC): # noqa: B024 'RV_set', '_sample', '_sample_DF', + '_uni_sample', 'anchor', 'distribution', 'f_map', 'name', - 'uni_samples', ] def __init__( @@ -1153,7 +1153,7 @@ def __init__( self.name = name self.distribution: str | None = None self.f_map = f_map - self.uni_samples: np.ndarray | None = None + self._uni_sample: np.ndarray | None = None self.RV_set: RandomVariableSet | None = None self._sample_DF: pd.Series | None = None self._sample: np.ndarray | None = None @@ -1219,7 +1219,9 @@ def uni_sample(self) -> np.ndarray | None: The sample from the controlling uniform distribution. """ - return self.anchor.uni_samples + if self.anchor is self: + return self._uni_sample + return self.anchor.uni_sample @uni_sample.setter def uni_sample(self, value: np.ndarray) -> None: @@ -1232,7 +1234,7 @@ def uni_sample(self, value: np.ndarray) -> None: An array of floating point values in the [0, 1] domain. """ - self.uni_samples = value + self._uni_sample = value class RandomVariable(BaseRandomVariable): From ba9b0c2303085a559e23018006244afc27dfe443 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Thu, 24 Oct 2024 05:01:51 -0700 Subject: [PATCH 12/27] Revert changes. Use multi-line if/else statements instead of single-line. --- pelicun/resources/auto/Hazus_Earthquake_IM.py | 71 +++++++++++++------ 1 file changed, 51 insertions(+), 20 deletions(-) diff --git a/pelicun/resources/auto/Hazus_Earthquake_IM.py b/pelicun/resources/auto/Hazus_Earthquake_IM.py index c674bb8c4..725946ed0 100644 --- a/pelicun/resources/auto/Hazus_Earthquake_IM.py +++ b/pelicun/resources/auto/Hazus_Earthquake_IM.py @@ -125,32 +125,54 @@ def convertBridgeToHAZUSclass(aim): # noqa: C901, N802 bridge_class = 'HWB28' if len_max_span > 150: - bridge_class = 'HWB1' if not seismic else 'HWB2' + if not seismic: + bridge_class = 'HWB1' + else: + bridge_class = 'HWB2' elif num_span == 1: - bridge_class = 'HWB3' if not seismic else 'HWB4' + if not seismic: + bridge_class = 'HWB3' + else: + bridge_class = 'HWB4' - elif structure_type in list(range(101, 107)): - bridge_class = ('HWB5' if state != 6 else 'HWB6') if not seismic else 'HWB7' + elif structureType in list(range(101, 107)): + if not seismic: + if state != 6: + bridge_class = 'HWB5' + else: + bridge_class = 'HWB6' + else: + bridge_class = 'HWB7' - elif structure_type in [205, 206]: - bridge_class = 'HWB8' if not seismic else 'HWB9' + elif structureType in [205, 206]: + if not seismic: + bridge_class = 'HWB8' + else: + bridge_class = 'HWB9' - elif structure_type in list(range(201, 207)): - bridge_class = 'HWB10' if not seismic else 'HWB11' + elif structureType in list(range(201, 207)): + if not seismic: + bridge_class = 'HWB10' + else: + bridge_class = 'HWB11' - elif structure_type in list(range(301, 307)): + elif structureType in list(range(301, 307)): if not seismic: if len_max_span >= 20: - bridge_class = 'HWB12' if state != 6 else 'HWB13' - elif state != 6: - bridge_class = 'HWB24' + if state != 6: + bridge_class = 'HWB12' + else: + bridge_class = 'HWB13' else: - bridge_class = 'HWB25' + if state != 6: + bridge_class = 'HWB24' + else: + bridge_class = 'HWB25' else: bridge_class = 'HWB14' - elif structure_type in list(range(402, 411)): + elif structureType in list(range(402, 411)): if not seismic: if len_max_span >= 20: bridge_class = 'HWB15' @@ -161,17 +183,26 @@ def convertBridgeToHAZUSclass(aim): # noqa: C901, N802 else: bridge_class = 'HWB16' - elif structure_type in list(range(501, 507)): + elif structureType in list(range(501, 507)): if not seismic: - bridge_class = 'HWB17' if state != 6 else 'HWB18' + if state != 6: + bridge_class = 'HWB17' + else: + bridge_class = 'HWB18' else: bridge_class = 'HWB19' - elif structure_type in [605, 606]: - bridge_class = 'HWB20' if not seismic else 'HWB21' + elif structureType in [605, 606]: + if not seismic: + bridge_class = 'HWB20' + else: + bridge_class = 'HWB21' - elif structure_type in list(range(601, 608)): - bridge_class = 'HWB22' if not seismic else 'HWB23' + elif structureType in list(range(601, 608)): + if not seismic: + bridge_class = 'HWB22' + else: + bridge_class = 'HWB23' # TODO: review and add HWB24-27 rules # TODO: also double check rules for HWB10-11 and HWB22-23 From 662ea815d5fcf26928c2a72898bd91bfe42c5f0f Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Thu, 24 Oct 2024 05:09:18 -0700 Subject: [PATCH 13/27] Manual formatting of DataFrame definitions. --- pelicun/resources/auto/Hazus_Earthquake_IM.py | 92 +++++++++---------- 1 file changed, 42 insertions(+), 50 deletions(-) diff --git a/pelicun/resources/auto/Hazus_Earthquake_IM.py b/pelicun/resources/auto/Hazus_Earthquake_IM.py index 725946ed0..d2265cbcf 100644 --- a/pelicun/resources/auto/Hazus_Earthquake_IM.py +++ b/pelicun/resources/auto/Hazus_Earthquake_IM.py @@ -351,10 +351,12 @@ def auto_populate(aim): # noqa: C901 else: lf = f'LF.{bt}.{dl}' - comp = pd.DataFrame( - {f'{lf}': ['ea', 1, 1, 1, 'N/A']}, - index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], - ).T + # fmt: off + comp = pd.DataFrame( # noqa + {f'{lf}': ['ea', 1, 1, 1, 'N/A']}, # noqa + index = ['Units','Location','Direction','Theta_0','Family'] # noqa + ).T # noqa + # fmt: on # if needed, add components to simulate damage from ground failure if ground_failure: @@ -363,15 +365,15 @@ def auto_populate(aim): # noqa: C901 fg_gf_h = f'GF.H.{foundation_type}' fg_gf_v = f'GF.V.{foundation_type}' - CMP_GF = pd.DataFrame( - { - f'{fg_gf_h}': ['ea', 1, 1, 1, 'N/A'], - f'{fg_gf_v}': ['ea', 1, 3, 1, 'N/A'], - }, - index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], - ).T + # fmt: off + comp_gf = pd.DataFrame( # noqa + {f'{fg_gf_h}':[ 'ea', 1, 1, 1, 'N/A'], # noqa + f'{fg_gf_v}':[ 'ea', 1, 3, 1, 'N/A']}, # noqa + index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa + ).T # noqa + # fmt: on - comp = pd.concat([comp, CMP_GF], axis=0) + comp = pd.concat([comp, comp_gf], axis=0) # set the number of stories to 1 # there is only one component in a building-level resolution @@ -412,13 +414,13 @@ def auto_populate(aim): # noqa: C901 bt = convertBridgeToHAZUSclass(gi) gi_ap['BridgeHazusClass'] = bt - comp = pd.DataFrame( - { - f'HWB.GS.{bt[3:]}': ['ea', 1, 1, 1, 'N/A'], - 'HWB.GF': ['ea', 1, 1, 1, 'N/A'], - }, - index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], - ).T + # fmt: off + comp = pd.DataFrame( # noqa + {f'HWB.GS.{bt[3:]}': [ 'ea', 1, 1, 1, 'N/A'], # noqa + f'HWB.GF': [ 'ea', 1, 1, 1, 'N/A']}, # noqa + index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa + ).T # noqa + # fmt: on dl_ap = { 'Asset': { @@ -445,13 +447,13 @@ def auto_populate(aim): # noqa: C901 tt = convertTunnelToHAZUSclass(gi) gi_ap['TunnelHazusClass'] = tt - comp = pd.DataFrame( - { - f'HTU.GS.{tt[3:]}': ['ea', 1, 1, 1, 'N/A'], - 'HTU.GF': ['ea', 1, 1, 1, 'N/A'], - }, - index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], - ).T + # fmt: off + comp = pd.DataFrame( # noqa + {f'HTU.GS.{tt[3:]}': [ 'ea', 1, 1, 1, 'N/A'], # noqa + f'HTU.GF': [ 'ea', 1, 1, 1, 'N/A']}, # noqa + index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa + ).T # noqa + # fmt: on dl_ap = { 'Asset': { @@ -477,10 +479,12 @@ def auto_populate(aim): # noqa: C901 rt = convertRoadToHAZUSclass(gi) gi_ap['RoadHazusClass'] = rt - comp = pd.DataFrame( - {f'HRD.GF.{rt[3:]}': ['ea', 1, 1, 1, 'N/A']}, - index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], - ).T + # fmt: off + comp = pd.DataFrame( # noqa + {f'HRD.GF.{rt[3:]}':[ 'ea', 1, 1, 1, 'N/A']}, # noqa + index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa + ).T # noqa + # fmt: on dl_ap = { 'Asset': { @@ -602,26 +606,14 @@ def auto_populate(aim): # noqa: C901 location_string = f'1--{num_segments}' if num_segments > 1 else '1' # Define performance model - comp = pd.DataFrame( - { - f'PWP.{pipe_flexibility}.GS': [ - 'ea', - location_string, - '0', - 1, - 'N/A', - ], - f'PWP.{pipe_flexibility}.GF': [ - 'ea', - location_string, - '0', - 1, - 'N/A', - ], - 'aggregate': ['ea', location_string, '0', 1, 'N/A'], - }, - index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], - ).T + # fmt: off + comp = pd.DataFrame( # noqa + {f'PWP.{pipe_flexibility}.GS': ['ea', location_string, '0', 1, 'N/A'], # noqa + f'PWP.{pipe_flexibility}.GF': ['ea', location_string, '0', 1, 'N/A'], # noqa + 'aggregate': ['ea', location_string, '0', 1, 'N/A']}, # noqa + index = ['Units','Location','Direction','Theta_0','Family'] # noqa + ).T # noqa + # fmt: on # Set up the demand cloning configuration for the pipe # segments, if required. From b42cb3fd6d49df4dbf68d40754773ce50c0feb4c Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Thu, 24 Oct 2024 05:43:01 -0700 Subject: [PATCH 14/27] Revert changes to ruleset files. --- .../rulesets/BldgClassRulesets.py | 183 ++----- .../rulesets/BuildingClassRulesets.py | 189 +++----- .../rulesets/FloodAssmRulesets.py | 117 ++--- .../rulesets/FloodClassRulesets.py | 237 ++++----- .../dl_calculation/rulesets/FloodRulesets.py | 236 ++++----- .../rulesets/MetaVarRulesets.py | 453 ++++++++---------- .../rulesets/WindCECBRulesets.py | 94 ++-- .../rulesets/WindCERBRulesets.py | 93 ++-- .../dl_calculation/rulesets/WindEFRulesets.py | 279 +++++------ .../rulesets/WindMECBRulesets.py | 105 ++-- .../rulesets/WindMERBRulesets.py | 107 +++-- .../dl_calculation/rulesets/WindMHRulesets.py | 65 ++- .../rulesets/WindMLRIRulesets.py | 92 ++-- .../rulesets/WindMLRMRulesets.py | 236 +++++---- .../rulesets/WindMMUHRulesets.py | 158 +++--- .../rulesets/WindMSFRulesets.py | 221 +++++---- .../rulesets/WindMetaVarRulesets.py | 413 ++++++++-------- .../rulesets/WindSECBRulesets.py | 102 ++-- .../rulesets/WindSERBRulesets.py | 101 ++-- .../rulesets/WindSPMBRulesets.py | 68 +-- .../rulesets/WindWMUHRulesets.py | 180 +++---- .../rulesets/WindWSFRulesets.py | 195 ++++---- pyproject.toml | 4 +- 23 files changed, 1849 insertions(+), 2079 deletions(-) diff --git a/pelicun/tests/dl_calculation/rulesets/BldgClassRulesets.py b/pelicun/tests/dl_calculation/rulesets/BldgClassRulesets.py index fd1f53bdf..60432ff41 100644 --- a/pelicun/tests/dl_calculation/rulesets/BldgClassRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/BldgClassRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -42,10 +43,13 @@ # Meredith Lockhead # Tracy Kijewski-Correa +import random +import numpy as np +import datetime -def building_class(bim: dict, hazard: str) -> str: # noqa: C901 +def building_class(BIM, hazard): """ - Short description. + Short description Long description @@ -60,182 +64,97 @@ def building_class(bim: dict, hazard: str) -> str: # noqa: C901 ------- bldg_class: str One of the standard building class labels from HAZUS - """ + # check hazard - if hazard not in {'wind', 'inundation'}: - print(f'WARNING: The provided hazard is not recognized: {hazard}') # noqa: T201 + if hazard not in ['wind', 'inundation']: + print(f'WARNING: The provided hazard is not recognized: {hazard}') if hazard == 'wind': - if bim['BuildingType'] == 'Wood': - if (bim['OccupancyClass'] == 'RES1') or ( - (bim['RoofShape'] != 'flt') and (bim['OccupancyClass'] == '') # noqa: PLC1901 - ): + if BIM['BuildingType'] == "Wood": + if ((BIM['OccupancyClass'] == 'RES1') or + ((BIM['RoofShape'] != 'flt') and (BIM['OccupancyClass'] == ''))): # OccupancyClass = RES1 # Wood Single-Family Homes (WSF1 or WSF2) # OR roof type = flat (HAZUS can only map flat to WSF1) # OR default (by '') - if ( - bim['RoofShape'] == 'flt' - ): # checking if there is a misclassication - bim['RoofShape'] = ( - # ensure the WSF has gab (by default, note gab - # is more vulnerable than hip) - 'gab' - ) + if BIM['RoofShape'] == 'flt': # checking if there is a misclassication + BIM['RoofShape'] = 'gab' # ensure the WSF has gab (by default, note gab is more vulneable than hip) bldg_class = 'WSF' else: # OccupancyClass = RES3, RES5, RES6, or COM8 # Wood Multi-Unit Hotel (WMUH1, WMUH2, or WMUH3) bldg_class = 'WMUH' - elif bim['BuildingType'] == 'Steel': - if (bim['DesignLevel'] == 'E') and ( - bim['OccupancyClass'] - in {'RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'} - ): + elif BIM['BuildingType'] == "Steel": + if ((BIM['DesignLevel'] == 'E') and + (BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D', + 'RES3E', 'RES3F'])): # Steel Engineered Residential Building (SERBL, SERBM, SERBH) bldg_class = 'SERB' - elif (bim['DesignLevel'] == 'E') and ( - bim['OccupancyClass'] - in { - 'COM1', - 'COM2', - 'COM3', - 'COM4', - 'COM5', - 'COM6', - 'COM7', - 'COM8', - 'COM9', - 'COM10', - } - ): + elif ((BIM['DesignLevel'] == 'E') and + (BIM['OccupancyClass'] in ['COM1', 'COM2', 'COM3', 'COM4', 'COM5', + 'COM6', 'COM7', 'COM8', 'COM9','COM10'])): # Steel Engineered Commercial Building (SECBL, SECBM, SECBH) bldg_class = 'SECB' - elif (bim['DesignLevel'] == 'PE') and ( - bim['OccupancyClass'] - not in {'RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'} - ): + elif ((BIM['DesignLevel'] == 'PE') and + (BIM['OccupancyClass'] not in ['RES3A', 'RES3B', 'RES3C', 'RES3D', + 'RES3E', 'RES3F'])): # Steel Pre-Engineered Metal Building (SPMBS, SPMBM, SPMBL) bldg_class = 'SPMB' else: bldg_class = 'SECB' - elif bim['BuildingType'] == 'Concrete': - if (bim['DesignLevel'] == 'E') and ( - bim['OccupancyClass'] - in { - 'RES3A', - 'RES3B', - 'RES3C', - 'RES3D', - 'RES3E', - 'RES3F', - 'RES5', - 'RES6', - } - ): + elif BIM['BuildingType'] == "Concrete": + if ((BIM['DesignLevel'] == 'E') and + (BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D', + 'RES3E', 'RES3F', 'RES5', 'RES6'])): # Concrete Engineered Residential Building (CERBL, CERBM, CERBH) bldg_class = 'CERB' - elif (bim['DesignLevel'] == 'E') and ( - bim['OccupancyClass'] - in { - 'COM1', - 'COM2', - 'COM3', - 'COM4', - 'COM5', - 'COM6', - 'COM7', - 'COM8', - 'COM9', - 'COM10', - } - ): + elif ((BIM['DesignLevel'] == 'E') and + (BIM['OccupancyClass'] in ['COM1', 'COM2', 'COM3', 'COM4', 'COM5', + 'COM6', 'COM7', 'COM8', 'COM9','COM10'])): # Concrete Engineered Commercial Building (CECBL, CECBM, CECBH) bldg_class = 'CECB' else: bldg_class = 'CECB' - elif bim['BuildingType'] == 'Masonry': - if bim['OccupancyClass'] == 'RES1': + elif BIM['BuildingType'] == "Masonry": + if BIM['OccupancyClass'] == 'RES1': # OccupancyClass = RES1 # Masonry Single-Family Homes (MSF1 or MSF2) bldg_class = 'MSF' - elif ( - bim['OccupancyClass'] - in {'RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'} - ) and (bim['DesignLevel'] == 'E'): + elif ((BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D', + 'RES3E', 'RES3F']) and (BIM['DesignLevel'] == 'E')): # Masonry Engineered Residential Building (MERBL, MERBM, MERBH) bldg_class = 'MERB' - elif ( - bim['OccupancyClass'] - in { - 'COM1', - 'COM2', - 'COM3', - 'COM4', - 'COM5', - 'COM6', - 'COM7', - 'COM8', - 'COM9', - 'COM10', - } - ) and (bim['DesignLevel'] == 'E'): + elif ((BIM['OccupancyClass'] in ['COM1', 'COM2', 'COM3', 'COM4', + 'COM5', 'COM6', 'COM7', 'COM8', 'COM9', + 'COM10']) and (BIM['DesignLevel'] == 'E')): # Masonry Engineered Commercial Building (MECBL, MECBM, MECBH) bldg_class = 'MECB' - elif bim['OccupancyClass'] in { - 'IND1', - 'IND2', - 'IND3', - 'IND4', - 'IND5', - 'IND6', - }: + elif BIM['OccupancyClass'] in ['IND1', 'IND2', 'IND3', 'IND4', 'IND5', 'IND6']: # Masonry Low-Rise Masonry Warehouse/Factory (MLRI) bldg_class = 'MLRI' - elif bim['OccupancyClass'] in { - 'RES3A', - 'RES3B', - 'RES3C', - 'RES3D', - 'RES3E', - 'RES3F', - 'RES5', - 'RES6', - 'COM8', - }: + elif BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D', + 'RES3E', 'RES3F', 'RES5', 'RES6', 'COM8']: # OccupancyClass = RES3X or COM8 # Masonry Multi-Unit Hotel/Motel (MMUH1, MMUH2, or MMUH3) bldg_class = 'MMUH' - elif (bim['NumberOfStories'] == 1) and ( - bim['OccupancyClass'] in {'COM1', 'COM2'} - ): + elif ((BIM['NumberOfStories'] == 1) and + (BIM['OccupancyClass'] in ['COM1', 'COM2'])): # Low-Rise Masonry Strip Mall (MLRM1 or MLRM2) bldg_class = 'MLRM' - # elif ( - # BIM['OccupancyClass'] - # in [ - # 'RES3A', - # 'RES3B', - # 'RES3C', - # 'RES3D', - # 'RES3E', - # 'RES3F', - # 'RES5', - # 'RES6', - # 'COM8', - # ] - # ) and (BIM['DesignLevel'] in ['NE', 'ME']): - # # Masonry Multi-Unit Hotel/Motel Non-Engineered - # # (MMUH1NE, MMUH2NE, or MMUH3NE) - # bldg_class = 'MMUHNE' else: - bldg_class = 'MECB' # for others not covered by the above + bldg_class = 'MECB' # for others not covered by the above + #elif ((BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D', + # 'RES3E', 'RES3F', 'RES5', 'RES6', + # 'COM8']) and (BIM['DesignLevel'] in ['NE', 'ME'])): + # # Masonry Multi-Unit Hotel/Motel Non-Engineered + # # (MMUH1NE, MMUH2NE, or MMUH3NE) + # bldg_class = 'MMUHNE' - elif bim['BuildingType'] == 'Manufactured': + elif BIM['BuildingType'] == "Manufactured": bldg_class = 'MH' else: diff --git a/pelicun/tests/dl_calculation/rulesets/BuildingClassRulesets.py b/pelicun/tests/dl_calculation/rulesets/BuildingClassRulesets.py index e3a593a50..b646946f0 100644 --- a/pelicun/tests/dl_calculation/rulesets/BuildingClassRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/BuildingClassRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -42,10 +43,14 @@ # Meredith Lockhead # Tracy Kijewski-Correa +import random +import numpy as np +import datetime -def building_class(bim: dict, hazard: str) -> str: # noqa: C901 + +def building_class(BIM, hazard): """ - Short description. + Short description Long description @@ -58,196 +63,112 @@ def building_class(bim: dict, hazard: str) -> str: # noqa: C901 ------- bldg_class: str One of the standard building class labels from HAZUS - """ + # check hazard - if hazard not in {'wind', 'inundation'}: - print(f'WARNING: The provided hazard is not recognized: {hazard}') # noqa: T201 + if hazard not in ['wind', 'inundation']: + print(f'WARNING: The provided hazard is not recognized: {hazard}') if hazard == 'wind': - if bim['BuildingType'] == 'Wood': - if (bim['OccupancyClass'] == 'RES1') or ( - (bim['RoofShape'] != 'flt') and (bim['OccupancyClass'] == '') # noqa: PLC1901 - ): + + if BIM['BuildingType'] == 'Wood': + if ((BIM['OccupancyClass'] == 'RES1') or + ((BIM['RoofShape'] != 'flt') and (BIM['OccupancyClass'] == ''))): # BuildingType = 3001 # OccupancyClass = RES1 # Wood Single-Family Homes (WSF1 or WSF2) # OR roof type = flat (HAZUS can only map flat to WSF1) # OR default (by '') - if ( - bim['RoofShape'] == 'flt' - ): # checking if there is a misclassication - bim['RoofShape'] = ( - # ensure the WSF has gab (by default, note gab - # is more vulnerable than hip) - 'gab' - ) + if BIM['RoofShape'] == 'flt': # checking if there is a misclassication + BIM['RoofShape'] = 'gab' # ensure the WSF has gab (by default, note gab is more vulneable than hip) bldg_class = 'WSF' else: # BuildingType = 3001 # OccupancyClass = RES3, RES5, RES6, or COM8 # Wood Multi-Unit Hotel (WMUH1, WMUH2, or WMUH3) bldg_class = 'WMUH' - elif bim['BuildingType'] == 'Steel': - if (bim['DesignLevel'] == 'E') and ( - bim['OccupancyClass'] - in {'RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'} - ): + elif BIM['BuildingType'] == 'Steel': + if ((BIM['DesignLevel'] == 'E') and + (BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D', + 'RES3E', 'RES3F'])): # BuildingType = 3002 # Steel Engineered Residential Building (SERBL, SERBM, SERBH) bldg_class = 'SERB' - elif (bim['DesignLevel'] == 'E') and ( - bim['OccupancyClass'] - in { - 'COM1', - 'COM2', - 'COM3', - 'COM4', - 'COM5', - 'COM6', - 'COM7', - 'COM8', - 'COM9', - 'COM10', - } - ): + elif ((BIM['DesignLevel'] == 'E') and + (BIM['OccupancyClass'] in ['COM1', 'COM2', 'COM3', 'COM4', 'COM5', + 'COM6', 'COM7', 'COM8', 'COM9','COM10'])): # BuildingType = 3002 # Steel Engineered Commercial Building (SECBL, SECBM, SECBH) bldg_class = 'SECB' - elif (bim['DesignLevel'] == 'PE') and ( - bim['OccupancyClass'] - not in {'RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'} - ): + elif ((BIM['DesignLevel'] == 'PE') and + (BIM['OccupancyClass'] not in ['RES3A', 'RES3B', 'RES3C', 'RES3D', + 'RES3E', 'RES3F'])): # BuildingType = 3002 # Steel Pre-Engineered Metal Building (SPMBS, SPMBM, SPMBL) bldg_class = 'SPMB' else: bldg_class = 'SECB' - elif bim['BuildingType'] == 'Concrete': - if (bim['DesignLevel'] == 'E') and ( - bim['OccupancyClass'] - in { - 'RES3A', - 'RES3B', - 'RES3C', - 'RES3D', - 'RES3E', - 'RES3F', - 'RES5', - 'RES6', - } - ): + elif BIM['BuildingType'] == 'Concrete': + if ((BIM['DesignLevel'] == 'E') and + (BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D', + 'RES3E', 'RES3F', 'RES5', 'RES6'])): # BuildingType = 3003 # Concrete Engineered Residential Building (CERBL, CERBM, CERBH) bldg_class = 'CERB' - elif (bim['DesignLevel'] == 'E') and ( - bim['OccupancyClass'] - in { - 'COM1', - 'COM2', - 'COM3', - 'COM4', - 'COM5', - 'COM6', - 'COM7', - 'COM8', - 'COM9', - 'COM10', - } - ): + elif ((BIM['DesignLevel'] == 'E') and + (BIM['OccupancyClass'] in ['COM1', 'COM2', 'COM3', 'COM4', 'COM5', + 'COM6', 'COM7', 'COM8', 'COM9','COM10'])): # BuildingType = 3003 # Concrete Engineered Commercial Building (CECBL, CECBM, CECBH) bldg_class = 'CECB' else: bldg_class = 'CECB' - elif bim['BuildingType'] == 'Masonry': - if bim['OccupancyClass'] == 'RES1': + elif BIM['BuildingType'] == 'Masonry': + if BIM['OccupancyClass'] == 'RES1': # BuildingType = 3004 # OccupancyClass = RES1 # Masonry Single-Family Homes (MSF1 or MSF2) bldg_class = 'MSF' - elif ( - bim['OccupancyClass'] - in {'RES3A', 'RES3B', 'RES3C', 'RES3D', 'RES3E', 'RES3F'} - ) and (bim['DesignLevel'] == 'E'): + elif ((BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D', + 'RES3E', 'RES3F']) and (BIM['DesignLevel'] == 'E')): # BuildingType = 3004 # Masonry Engineered Residential Building (MERBL, MERBM, MERBH) bldg_class = 'MERB' - elif ( - bim['OccupancyClass'] - in { - 'COM1', - 'COM2', - 'COM3', - 'COM4', - 'COM5', - 'COM6', - 'COM7', - 'COM8', - 'COM9', - 'COM10', - } - ) and (bim['DesignLevel'] == 'E'): + elif ((BIM['OccupancyClass'] in ['COM1', 'COM2', 'COM3', 'COM4', + 'COM5', 'COM6', 'COM7', 'COM8', 'COM9', + 'COM10']) and (BIM['DesignLevel'] == 'E')): # BuildingType = 3004 # Masonry Engineered Commercial Building (MECBL, MECBM, MECBH) bldg_class = 'MECB' - elif bim['OccupancyClass'] in { - 'IND1', - 'IND2', - 'IND3', - 'IND4', - 'IND5', - 'IND6', - }: + elif BIM['OccupancyClass'] in ['IND1', 'IND2', 'IND3', 'IND4', 'IND5', 'IND6']: # BuildingType = 3004 # Masonry Low-Rise Masonry Warehouse/Factory (MLRI) bldg_class = 'MLRI' - elif bim['OccupancyClass'] in { - 'RES3A', - 'RES3B', - 'RES3C', - 'RES3D', - 'RES3E', - 'RES3F', - 'RES5', - 'RES6', - 'COM8', - }: + elif BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D', + 'RES3E', 'RES3F', 'RES5', 'RES6', 'COM8']: # BuildingType = 3004 # OccupancyClass = RES3X or COM8 # Masonry Multi-Unit Hotel/Motel (MMUH1, MMUH2, or MMUH3) bldg_class = 'MMUH' - elif (bim['NumberOfStories'] == 1) and ( - bim['OccupancyClass'] in {'COM1', 'COM2'} - ): + elif ((BIM['NumberOfStories'] == 1) and + (BIM['OccupancyClass'] in ['COM1', 'COM2'])): # BuildingType = 3004 # Low-Rise Masonry Strip Mall (MLRM1 or MLRM2) bldg_class = 'MLRM' - # elif ( - # BIM['OccupancyClass'] - # in [ - # 'RES3A', - # 'RES3B', - # 'RES3C', - # 'RES3D', - # 'RES3E', - # 'RES3F', - # 'RES5', - # 'RES6', - # 'COM8', - # ] - # ) and (BIM['DesignLevel'] in ['NE', 'ME']): - # # BuildingType = 3004 - # # Masonry Multi-Unit Hotel/Motel Non-Engineered - # # (MMUH1NE, MMUH2NE, or MMUH3NE) - # return 'MMUHNE' else: - bldg_class = 'MECB' # for others not covered by the above - elif bim['BuildingType'] == 'Manufactured': + bldg_class = 'MECB' # for others not covered by the above + #elif ((BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D', + # 'RES3E', 'RES3F', 'RES5', 'RES6', + # 'COM8']) and (BIM['DesignLevel'] in ['NE', 'ME'])): + # # BuildingType = 3004 + # # Masonry Multi-Unit Hotel/Motel Non-Engineered + # # (MMUH1NE, MMUH2NE, or MMUH3NE) + # return 'MMUHNE' + elif BIM['BuildingType'] == 'Manufactured': bldg_class = 'MH' else: bldg_class = 'WMUH' # if nan building type is provided, return the dominant class - return bldg_class + return bldg_class \ No newline at end of file diff --git a/pelicun/tests/dl_calculation/rulesets/FloodAssmRulesets.py b/pelicun/tests/dl_calculation/rulesets/FloodAssmRulesets.py index c06a6bdbe..658d2e4a3 100644 --- a/pelicun/tests/dl_calculation/rulesets/FloodAssmRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/FloodAssmRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -43,13 +44,14 @@ # Meredith Lockhead # Tracy Kijewski-Correa +import random +import numpy as np +import datetime +import math -from __future__ import annotations - - -def Assm_config(bim: dict) -> tuple[str, str]: +def Assm_config(BIM): """ - Rules to identify the flood vulnerability category. + Rules to identify the flood vunerability category Parameters ---------- @@ -59,88 +61,45 @@ def Assm_config(bim: dict) -> tuple[str, str]: Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + year = BIM['YearBuilt'] # just for the sake of brevity # Flood Type - if bim['FloodZone'] == 'AO': - flood_type = 'raz' # Riverline/A-Zone - elif bim['FloodZone'] in {'AE', 'AH', 'A'}: - flood_type = 'caz' # Costal/A-Zone - elif bim['FloodZone'] == 'VE': - flood_type = 'cvz' # Costal/V-Zone + if BIM['FloodZone'] in ['AO']: + flood_type = 'raz' # Riverline/A-Zone + elif BIM['FloodZone'] in ['AE', 'AH', 'A']: + flood_type = 'caz' # Costal/A-Zone + elif BIM['FloodZone'] in ['VE']: + flood_type = 'cvz' # Costal/V-Zone else: - flood_type = 'caz' # Default + flood_type = 'caz' # Default # PostFIRM - post_firm = False # Default - city_list = [ - 'Absecon', - 'Atlantic', - 'Brigantine', - 'Buena', - 'Buena Vista', - 'Corbin City', - 'Egg Harbor City', - 'Egg Harbor', - 'Estell Manor', - 'Folsom', - 'Galloway', - 'Hamilton', - 'Hammonton', - 'Linwood', - 'Longport', - 'Margate City', - 'Mullica', - 'Northfield', - 'Pleasantville', - 'Port Republic', - 'Somers Point', - 'Ventnor City', - 'Weymouth', - ] - year_list = [ - 1976, - 1971, - 1971, - 1983, - 1979, - 1981, - 1982, - 1983, - 1978, - 1982, - 1983, - 1977, - 1982, - 1983, - 1974, - 1974, - 1982, - 1979, - 1983, - 1983, - 1982, - 1971, - 1979, - ] - for i in range(22): - post_firm = ( - (bim['City'] == city_list[i]) and (year > year_list[i]) - ) or post_firm + PostFIRM = False # Default + city_list = ['Absecon', 'Atlantic', 'Brigantine', 'Buena', 'Buena Vista', + 'Corbin City', 'Egg Harbor City', 'Egg Harbor', 'Estell Manor', + 'Folsom', 'Galloway', 'Hamilton', 'Hammonton', 'Linwood', + 'Longport', 'Margate City', 'Mullica', 'Northfield', + 'Pleasantville', 'Port Republic', 'Somers Point', + 'Ventnor City', 'Weymouth'] + year_list = [1976, 1971, 1971, 1983, 1979, 1981, 1982, 1983, 1978, 1982, + 1983, 1977, 1982, 1983, 1974, 1974, 1982, 1979, 1983, 1983, + 1982, 1971, 1979] + for i in range(0,22): + PostFIRM = (((BIM['City'] == city_list[i]) and (year > year_list[i])) or \ + PostFIRM) # fl_assm - fl_assm = ( - f"{'fl_surge_assm'}_" - f"{bim['OccupancyClass']}_" - f"{int(post_firm)}_" - f"{flood_type}" - ) + fl_assm = f"{'fl_surge_assm'}_" \ + f"{BIM['OccupancyClass']}_" \ + f"{int(PostFIRM)}_" \ + f"{flood_type}" # hu_assm - hu_assm = f"{'hu_surge_assm'}_{bim['OccupancyClass']}_{int(post_firm)}" + hu_assm = f"{'hu_surge_assm'}_" \ + f"{BIM['OccupancyClass']}_" \ + f"{int(PostFIRM)}" - return hu_assm, fl_assm + return hu_assm, fl_assm \ No newline at end of file diff --git a/pelicun/tests/dl_calculation/rulesets/FloodClassRulesets.py b/pelicun/tests/dl_calculation/rulesets/FloodClassRulesets.py index 954235e2a..702c829ec 100644 --- a/pelicun/tests/dl_calculation/rulesets/FloodClassRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/FloodClassRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -44,10 +45,9 @@ import numpy as np - -def FL_config(bim: dict) -> str: # noqa: C901 +def FL_config(BIM): """ - Rules to identify the flood vulnerability category. + Rules to identify the flood vunerability category Parameters ---------- @@ -57,126 +57,143 @@ def FL_config(bim: dict) -> str: # noqa: C901 Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + year = BIM['YearBuilt'] # just for the sake of brevity # Flood Type - if bim['FloodZone'] == 'AO': - flood_type = 'raz' # Riverline/A-Zone - elif bim['FloodZone'] in {'A', 'AE'} or bim['FloodZone'].startswith('V'): - flood_type = 'cvz' # Costal-Zone + if BIM['FloodZone'] == 'AO': + flood_type = 'raz' # Riverline/A-Zone + elif BIM['FloodZone'] in ['A', 'AE']: + flood_type = 'cvz' # Costal-Zone + elif BIM['FloodZone'].startswith('V'): + flood_type = 'cvz' # Costal-Zone else: - flood_type = 'cvz' # Default + flood_type = 'cvz' # Default - # flake8 - unused variable: `FFE` - # # First Floor Elevation (FFE) - # if flood_type in ['raz', 'caz']: - # FFE = BIM['FirstFloorElevation'] - # else: - # FFE = BIM['FirstFloorElevation'] - 1.0 + # First Floor Elevation (FFE) + if flood_type in ['raz', 'caz']: + FFE = BIM['FirstFloorElevation'] + else: + FFE = BIM['FirstFloorElevation'] - 1.0 # PostFIRM - post_firm = False # Default - city_list = [ - 'Absecon', - 'Atlantic', - 'Brigantine', - 'Buena', - 'Buena Vista', - 'Corbin City', - 'Egg Harbor City', - 'Egg Harbor', - 'Estell Manor', - 'Folsom', - 'Galloway', - 'Hamilton', - 'Hammonton', - 'Linwood', - 'Longport', - 'Margate City', - 'Mullica', - 'Northfield', - 'Pleasantville', - 'Port Republic', - 'Somers Point', - 'Ventnor City', - 'Weymouth', - ] - year_list = [ - 1976, - 1971, - 1971, - 1983, - 1979, - 1981, - 1982, - 1983, - 1978, - 1982, - 1983, - 1977, - 1982, - 1983, - 1974, - 1974, - 1982, - 1979, - 1983, - 1983, - 1982, - 1971, - 1979, - ] - for i in range(22): - post_firm = ( - (bim['City'] == city_list[i]) and (year > year_list[i]) - ) or post_firm + PostFIRM = False # Default + city_list = ['Absecon', 'Atlantic', 'Brigantine', 'Buena', 'Buena Vista', + 'Corbin City', 'Egg Harbor City', 'Egg Harbor', 'Estell Manor', + 'Folsom', 'Galloway', 'Hamilton', 'Hammonton', 'Linwood', + 'Longport', 'Margate City', 'Mullica', 'Northfield', + 'Pleasantville', 'Port Republic', 'Somers Point', + 'Ventnor City', 'Weymouth'] + year_list = [1976, 1971, 1971, 1983, 1979, 1981, 1982, 1983, 1978, 1982, + 1983, 1977, 1982, 1983, 1974, 1974, 1982, 1979, 1983, 1983, + 1982, 1971, 1979] + for i in range(0,22): + PostFIRM = (((BIM['City'] == city_list[i]) and (year > year_list[i])) or \ + PostFIRM) # Basement Type - if bim['SplitLevel'] and (bim['FoundationType'] == 3504): - bmt_type = 'spt' # Split-Level Basement - elif bim['FoundationType'] in {3501, 3502, 3503, 3505, 3506, 3507}: - bmt_type = 'bn' # No Basement - elif (not bim['SplitLevel']) and (bim['FoundationType'] == 3504): - bmt_type = 'bw' # Basement + if BIM['SplitLevel'] and (BIM['FoundationType'] == 3504): + bmt_type = 'spt' # Split-Level Basement + elif BIM['FoundationType'] in [3501, 3502, 3503, 3505, 3506, 3507]: + bmt_type = 'bn' # No Basement + elif (not BIM['SplitLevel']) and (BIM['FoundationType'] == 3504): + bmt_type = 'bw' # Basement else: - bmt_type = 'bw' # Default + bmt_type = 'bw' # Default - if bim['OccupancyClass'] not in {'RES1', 'RES2'}: - if 'RES3' in bim['OccupancyClass']: - fl_config = f"{'fl'}_" f"{'RES3'}" + # Duration + dur = 'short' + + # Occupancy Type + if BIM['OccupancyClass'] == 'RES1': + if BIM['NumberOfStories'] == 1: + if flood_type == 'raz': + OT = 'SF1XA' + elif flood_type == 'cvz': + OT = 'SF1XV' else: - fl_config = f"{'fl'}_" f"{bim['OccupancyClass']}" - elif bim['OccupancyClass'] == 'RES2': - fl_config = f"{'fl'}_" f"{bim['OccupancyClass']}_" f"{flood_type}" - elif bmt_type == 'spt': - fl_config = ( - f"{'fl'}_" - f"{bim['OccupancyClass']}_" - f"{'sl'}_" - f"{'bw'}_" - f"{flood_type}" - ) + if bmt_type == 'nav': + if flood_type == 'raz': + OT = 'SF2XA' + elif flood_type == 'cvz': + OT = 'SF2XV' + elif bmt_type == 'bmt': + if flood_type == 'raz': + OT = 'SF2BA' + elif flood_type == 'cvz': + OT = 'SF2BV' + elif bmt_type == 'spt': + if flood_type == 'raz': + OT = 'SF2SA' + elif flood_type == 'cvz': + OT = 'SF2SV' + elif 'RES3' in BIM['OccupancyClass']: + OT = 'APT' else: - st = 's' + str(np.min([bim['NumberOfStories'], 3])) - fl_config = ( - f"{'fl'}_" - f"{bim['OccupancyClass']}_" - f"{st}_" - f"{bmt_type}_" - f"{flood_type}" - ) + ap_OT = { + 'RES2': 'MH', + 'RES4': 'HOT', + 'RES5': 'NURSE', + 'RES6': 'NURSE', + 'COM1': 'RETAL', + 'COM2': 'WHOLE', + 'COM3': 'SERVICE', + 'COM4': 'OFFICE', + 'COM5': 'BANK', + 'COM6': 'HOSP', + 'COM7': 'MED', + 'COM8': 'REC', + 'COM9': 'THEAT', + 'COM10': 'GARAGE', + 'IND1': 'INDH', + 'IND2': 'INDL', + 'IND3': 'CHEM', + 'IND4': 'PROC', + 'IND5': 'CHEM', + 'IND6': 'CONST', + 'AGR1': 'AGRI', + 'REL1': 'RELIG', + 'GOV1': 'CITY', + 'GOV2': 'EMERG', + 'EDU1': 'SCHOOL', + 'EDU2': 'SCHOOL' + } + ap_OT[BIM['OccupancyClass']] + + + if not (BIM['OccupancyClass'] in ['RES1', 'RES2']): + if 'RES3' in BIM['OccupancyClass']: + fl_config = f"{'fl'}_" \ + f"{'RES3'}" + else: + fl_config = f"{'fl'}_" \ + f"{BIM['OccupancyClass']}" + elif BIM['OccupancyClass'] == 'RES2': + fl_config = f"{'fl'}_" \ + f"{BIM['OccupancyClass']}_" \ + f"{flood_type}" + else: + if bmt_type == 'spt': + fl_config = f"{'fl'}_" \ + f"{BIM['OccupancyClass']}_" \ + f"{'sl'}_" \ + f"{'bw'}_" \ + f"{flood_type}" + else: + st = 's'+str(np.min([BIM['NumberOfStories'],3])) + fl_config = f"{'fl'}_" \ + f"{BIM['OccupancyClass']}_" \ + f"{st}_" \ + f"{bmt_type}_" \ + f"{flood_type}" # extend the BIM dictionary - bim.update( - { - 'FloodType': flood_type, - 'BasementType': bmt_type, - 'PostFIRM': post_firm, - } - ) + BIM.update(dict( + FloodType = flood_type, + BasementType=bmt_type, + PostFIRM=PostFIRM, + )) - return fl_config + return fl_config \ No newline at end of file diff --git a/pelicun/tests/dl_calculation/rulesets/FloodRulesets.py b/pelicun/tests/dl_calculation/rulesets/FloodRulesets.py index 7d8864b8f..882d8d933 100644 --- a/pelicun/tests/dl_calculation/rulesets/FloodRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/FloodRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -44,10 +45,9 @@ import numpy as np - -def FL_config(bim: dict) -> str: # noqa: C901 +def FL_config(BIM): """ - Rules to identify the flood vulnerability category. + Rules to identify the flood vunerability category Parameters ---------- @@ -57,126 +57,144 @@ def FL_config(bim: dict) -> str: # noqa: C901 Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + year = BIM['YearBuilt'] # just for the sake of brevity # Flood Type - if bim['FloodZone'] == 'AO': - flood_type = 'raz' # Riverline/A-Zone - elif bim['FloodZone'] in {'A', 'AE'} or bim['FloodZone'].startswith('V'): - flood_type = 'cvz' # Costal-Zone + if BIM['FloodZone'] == 'AO': + flood_type = 'raz' # Riverline/A-Zone + elif BIM['FloodZone'] in ['A', 'AE']: + flood_type = 'cvz' # Costal-Zone + elif BIM['FloodZone'].startswith('V'): + flood_type = 'cvz' # Costal-Zone else: - flood_type = 'cvz' # Default + flood_type = 'cvz' # Default - # flake8 - unused variable: `FFE`. - # # First Floor Elevation (FFE) - # if flood_type in ['raz', 'caz']: - # FFE = BIM['FirstFloorElevation'] - # else: - # FFE = BIM['FirstFloorElevation'] - 1.0 + # First Floor Elevation (FFE) + if flood_type in ['raz', 'caz']: + FFE = BIM['FirstFloorElevation'] + else: + FFE = BIM['FirstFloorElevation'] - 1.0 # PostFIRM - post_firm = False # Default - city_list = [ - 'Absecon', - 'Atlantic', - 'Brigantine', - 'Buena', - 'Buena Vista', - 'Corbin City', - 'Egg Harbor City', - 'Egg Harbor', - 'Estell Manor', - 'Folsom', - 'Galloway', - 'Hamilton', - 'Hammonton', - 'Linwood', - 'Longport', - 'Margate City', - 'Mullica', - 'Northfield', - 'Pleasantville', - 'Port Republic', - 'Somers Point', - 'Ventnor City', - 'Weymouth', - ] - year_list = [ - 1976, - 1971, - 1971, - 1983, - 1979, - 1981, - 1982, - 1983, - 1978, - 1982, - 1983, - 1977, - 1982, - 1983, - 1974, - 1974, - 1982, - 1979, - 1983, - 1983, - 1982, - 1971, - 1979, - ] - for i in range(22): - post_firm = ( - (bim['City'] == city_list[i]) and (year > year_list[i]) - ) or post_firm + PostFIRM = False # Default + city_list = ['Absecon', 'Atlantic', 'Brigantine', 'Buena', 'Buena Vista', + 'Corbin City', 'Egg Harbor City', 'Egg Harbor', 'Estell Manor', + 'Folsom', 'Galloway', 'Hamilton', 'Hammonton', 'Linwood', + 'Longport', 'Margate City', 'Mullica', 'Northfield', + 'Pleasantville', 'Port Republic', 'Somers Point', + 'Ventnor City', 'Weymouth'] + year_list = [1976, 1971, 1971, 1983, 1979, 1981, 1982, 1983, 1978, 1982, + 1983, 1977, 1982, 1983, 1974, 1974, 1982, 1979, 1983, 1983, + 1982, 1971, 1979] + for i in range(0,22): + PostFIRM = (((BIM['City'] == city_list[i]) and (year > year_list[i])) or \ + PostFIRM) # Basement Type - if bim['SplitLevel'] and (bim['FoundationType'] == 3504): - bmt_type = 'spt' # Split-Level Basement - elif bim['FoundationType'] in {3501, 3502, 3503, 3505, 3506, 3507}: - bmt_type = 'bn' # No Basement - elif (not bim['SplitLevel']) and (bim['FoundationType'] == 3504): - bmt_type = 'bw' # Basement + if BIM['SplitLevel'] and (BIM['FoundationType'] == 3504): + bmt_type = 'spt' # Split-Level Basement + elif BIM['FoundationType'] in [3501, 3502, 3503, 3505, 3506, 3507]: + bmt_type = 'bn' # No Basement + elif (not BIM['SplitLevel']) and (BIM['FoundationType'] == 3504): + bmt_type = 'bw' # Basement + else: + bmt_type = 'bw' # Default + + # Duration + dur = 'short' + + # Occupancy Type + if BIM['OccupancyClass'] == 'RES1': + if BIM['NumberOfStories'] == 1: + if flood_type == 'raz': + OT = 'SF1XA' + elif flood_type == 'cvz': + OT = 'SF1XV' + else: + if bmt_type == 'nav': + if flood_type == 'raz': + OT = 'SF2XA' + elif flood_type == 'cvz': + OT = 'SF2XV' + elif bmt_type == 'bmt': + if flood_type == 'raz': + OT = 'SF2BA' + elif flood_type == 'cvz': + OT = 'SF2BV' + elif bmt_type == 'spt': + if flood_type == 'raz': + OT = 'SF2SA' + elif flood_type == 'cvz': + OT = 'SF2SV' + elif 'RES3' in BIM['OccupancyClass']: + OT = 'APT' else: - bmt_type = 'bw' # Default + ap_OT = { + 'RES2': 'MH', + 'RES4': 'HOT', + 'RES5': 'NURSE', + 'RES6': 'NURSE', + 'COM1': 'RETAL', + 'COM2': 'WHOLE', + 'COM3': 'SERVICE', + 'COM4': 'OFFICE', + 'COM5': 'BANK', + 'COM6': 'HOSP', + 'COM7': 'MED', + 'COM8': 'REC', + 'COM9': 'THEAT', + 'COM10': 'GARAGE', + 'IND1': 'INDH', + 'IND2': 'INDL', + 'IND3': 'CHEM', + 'IND4': 'PROC', + 'IND5': 'CHEM', + 'IND6': 'CONST', + 'AGR1': 'AGRI', + 'REL1': 'RELIG', + 'GOV1': 'CITY', + 'GOV2': 'EMERG', + 'EDU1': 'SCHOOL', + 'EDU2': 'SCHOOL' + } + ap_OT[BIM['OccupancyClass']] + - if bim['OccupancyClass'] not in {'RES1', 'RES2'}: - if 'RES3' in bim['OccupancyClass']: - fl_config = f"{'fl'}_" f"{'RES3'}" + if not (BIM['OccupancyClass'] in ['RES1', 'RES2']): + if 'RES3' in BIM['OccupancyClass']: + fl_config = f"{'fl'}_" \ + f"{'RES3'}" else: - fl_config = f"{'fl'}_" f"{bim['OccupancyClass']}" - elif bim['OccupancyClass'] == 'RES2': - fl_config = f"{'fl'}_" f"{bim['OccupancyClass']}_" f"{flood_type}" - elif bmt_type == 'spt': - fl_config = ( - f"{'fl'}_" - f"{bim['OccupancyClass']}_" - f"{'sl'}_" - f"{'bw'}_" - f"{flood_type}" - ) + fl_config = f"{'fl'}_" \ + f"{BIM['OccupancyClass']}" + elif BIM['OccupancyClass'] == 'RES2': + fl_config = f"{'fl'}_" \ + f"{BIM['OccupancyClass']}_" \ + f"{flood_type}" else: - st = 's' + str(np.min([bim['NumberOfStories'], 3])) - fl_config = ( - f"{'fl'}_" - f"{bim['OccupancyClass']}_" - f"{st}_" - f"{bmt_type}_" - f"{flood_type}" - ) + if bmt_type == 'spt': + fl_config = f"{'fl'}_" \ + f"{BIM['OccupancyClass']}_" \ + f"{'sl'}_" \ + f"{'bw'}_" \ + f"{flood_type}" + else: + st = 's'+str(np.min([BIM['NumberOfStories'],3])) + fl_config = f"{'fl'}_" \ + f"{BIM['OccupancyClass']}_" \ + f"{st}_" \ + f"{bmt_type}_" \ + f"{flood_type}" # extend the BIM dictionary - bim.update( - { - 'FloodType': flood_type, - 'BasementType': bmt_type, - 'PostFIRM': post_firm, - } - ) + BIM.update(dict( + FloodType = flood_type, + BasementType=bmt_type, + PostFIRM=PostFIRM, + )) return fl_config + diff --git a/pelicun/tests/dl_calculation/rulesets/MetaVarRulesets.py b/pelicun/tests/dl_calculation/rulesets/MetaVarRulesets.py index cc6155740..cfd50c7f2 100644 --- a/pelicun/tests/dl_calculation/rulesets/MetaVarRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/MetaVarRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -42,16 +43,13 @@ # Meredith Lockhead # Tracy Kijewski-Correa -from __future__ import annotations - import numpy as np - -def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: C901 +def parse_BIM(BIM_in, location, hazards): """ Parses the information provided in the BIM model. - The attributes below list the expected metadata in the BIM file + The atrributes below list the expected metadata in the BIM file Parameters ---------- @@ -85,140 +83,132 @@ def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: ------- BIM: dictionary Parsed building characteristics. - - Raises - ------ - KeyError - In case of missing attributes. - """ + # check location - if location not in {'LA', 'NJ'}: - print(f'WARNING: The provided location is not recognized: {location}') # noqa: T201 + if location not in ['LA', 'NJ']: + print(f'WARNING: The provided location is not recognized: {location}') # check hazard for hazard in hazards: - if hazard not in {'wind', 'inundation'}: - print(f'WARNING: The provided hazard is not recognized: {hazard}') # noqa: T201 + if hazard not in ['wind', 'inundation']: + print(f'WARNING: The provided hazard is not recognized: {hazard}') # initialize the BIM dict - bim = {} + BIM = {} if 'wind' in hazards: # maps roof type to the internal representation - ap_roof_type = { - 'hip': 'hip', + ap_RoofType = { + 'hip' : 'hip', 'hipped': 'hip', - 'Hip': 'hip', + 'Hip' : 'hip', 'gabled': 'gab', - 'gable': 'gab', - 'Gable': 'gab', - 'flat': 'flt', - 'Flat': 'flt', + 'gable' : 'gab', + 'Gable' : 'gab', + 'flat' : 'flt', + 'Flat' : 'flt' } # maps roof system to the internal representation - ap_roof_system = {'Wood': 'trs', 'OWSJ': 'ows', 'N/A': 'trs'} - roof_system = bim_in.get('RoofSystem', 'Wood') - - # flake8 - unused variable: `ap_NoUnits`. - # # maps number of units to the internal representation - # ap_NoUnits = { - # 'Single': 'sgl', - # 'Multiple': 'mlt', - # 'Multi': 'mlt', - # 'nav': 'nav', - # } + ap_RoofSystem = { + 'Wood': 'trs', + 'OWSJ': 'ows', + 'N/A': 'trs' + } + roof_system = BIM_in.get('RoofSystem', 'Wood') + + # maps number of units to the internal representation + ap_NoUnits = { + 'Single': 'sgl', + 'Multiple': 'mlt', + 'Multi': 'mlt', + 'nav': 'nav' + } # Average January Temp. - ap_ajt = {'Above': 'above', 'Below': 'below'} + ap_ajt = { + 'Above': 'above', + 'Below': 'below' + } # Year built alname_yearbuilt = ['yearBuilt', 'YearBuiltMODIV', 'YearBuiltNJDEP'] - yearbuilt = bim_in.get('YearBuilt') + yearbuilt = BIM_in.get('YearBuilt', None) # if none of the above works, set a default if yearbuilt is None: for alname in alname_yearbuilt: - if alname in bim_in: - yearbuilt = bim_in[alname] + if alname in BIM_in.keys(): + yearbuilt = BIM_in[alname] break if yearbuilt is None: yearbuilt = 1985 # Number of Stories - alname_nstories = [ - 'stories', - 'NumberofStories0', - 'NumberofStories', - 'NumberofStories1', - ] + alname_nstories = ['stories', 'NumberofStories0', 'NumberofStories', 'NumberofStories1'] - nstories = bim_in.get('NumberOfStories') + nstories = BIM_in.get('NumberOfStories', None) if nstories is None: for alname in alname_nstories: - if alname in bim_in: - nstories = bim_in[alname] + if alname in BIM_in.keys(): + nstories = BIM_in[alname] break if nstories is None: - msg = 'NumberOfStories attribute missing, cannot autopopulate' - raise KeyError(msg) + raise KeyError("NumberOfStories attribute missing, cannot autopopulate") # Plan Area alname_area = ['area', 'PlanArea1', 'Area', 'PlanArea0'] - area = bim_in.get('PlanArea') + area = BIM_in.get('PlanArea', None) if area is None: for alname in alname_area: - if alname in bim_in: - area = bim_in[alname] + if alname in BIM_in.keys(): + area = BIM_in[alname] break if area is None: - msg = 'PlanArea attribute missing, cannot autopopulate' - raise KeyError(msg) + raise KeyError("PlanArea attribute missing, cannot autopopulate") # Design Wind Speed alname_dws = ['DWSII', 'DesignWindSpeed'] - dws = bim_in.get('DesignWindSpeed') + dws = BIM_in.get('DesignWindSpeed', None) if dws is None: for alname in alname_dws: - if alname in bim_in: - dws = bim_in[alname] + if alname in BIM_in.keys(): + dws = BIM_in[alname] break if dws is None: - msg = 'DesignWindSpeed attribute missing, cannot autopopulate' - raise KeyError(msg) + raise KeyError("DesignWindSpeed attribute missing, cannot autopopulate") # occupancy type alname_occupancy = ['occupancy', 'OccupancyClass'] - oc = bim_in.get('OccupancyClass') + oc = BIM_in.get('OccupancyClass', None) if oc is None: for alname in alname_occupancy: - if alname in bim_in: - oc = bim_in[alname] + if alname in BIM_in.keys(): + oc = BIM_in[alname] break if oc is None: - msg = 'OccupancyClass attribute missing, cannot autopopulate' - raise KeyError(msg) + raise KeyError("OccupancyClass attribute missing, cannot autopopulate") # if getting RES3 then converting it to default RES3A if oc == 'RES3': oc = 'RES3A' # maps for BuildingType - ap_building_type_nj = { + ap_BuildingType_NJ = { # Coastal areas with a 1% or greater chance of flooding and an # additional hazard associated with storm waves. 3001: 'Wood', @@ -229,117 +219,111 @@ def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: } if location == 'NJ': # NJDEP code for flood zone needs to be converted - buildingtype = ap_building_type_nj[bim_in['BuildingType']] - + buildingtype = ap_BuildingType_NJ[BIM_in['BuildingType']] + elif location == 'LA': # standard input should provide the building type as a string - buildingtype = bim_in['BuildingType'] - - # maps for design level (Marginal Engineered is mapped to - # Engineered as defauplt) - ap_design_level = {'E': 'E', 'NE': 'NE', 'PE': 'PE', 'ME': 'E'} - design_level = bim_in.get('DesignLevel', 'E') + buildingtype = BIM_in['BuildingType'] + + # maps for design level (Marginal Engineered is mapped to Engineered as default) + ap_DesignLevel = { + 'E': 'E', + 'NE': 'NE', + 'PE': 'PE', + 'ME': 'E' + } + design_level = BIM_in.get('DesignLevel','E') # flood zone - flood_zone = bim_in.get('FloodZone', 'X') + flood_zone = BIM_in.get('FloodZone', 'X') # add the parsed data to the BIM dict - bim.update( - { - 'OccupancyClass': str(oc), - 'BuildingType': buildingtype, - 'YearBuilt': int(yearbuilt), - 'NumberOfStories': int(nstories), - 'PlanArea': float(area), - 'V_ult': float(dws), - 'AvgJanTemp': ap_ajt[bim_in.get('AvgJanTemp', 'Below')], - 'RoofShape': ap_roof_type[bim_in['RoofShape']], - 'RoofSlope': float(bim_in.get('RoofSlope', 0.25)), # default 0.25 - 'SheathingThickness': float( - bim_in.get('SheathingThick', 1.0) - ), # default 1.0 - 'RoofSystem': str( - ap_roof_system[roof_system] - ), # only valid for masonry structures - 'Garage': float(bim_in.get('Garage', -1.0)), - 'LULC': bim_in.get('LULC', -1), - 'MeanRoofHt': float(bim_in.get('MeanRoofHt', 15.0)), # default 15 - 'WindowArea': float(bim_in.get('WindowArea', 0.20)), - 'WindZone': str(bim_in.get('WindZone', 'I')), - 'FloodZone': str(flood_zone), - } - ) + BIM.update(dict( + OccupancyClass=str(oc), + BuildingType=buildingtype, + YearBuilt=int(yearbuilt), + NumberOfStories=int(nstories), + PlanArea=float(area), + V_ult=float(dws), + AvgJanTemp=ap_ajt[BIM_in.get('AvgJanTemp','Below')], + RoofShape=ap_RoofType[BIM_in['RoofShape']], + RoofSlope=float(BIM_in.get('RoofSlope',0.25)), # default 0.25 + SheathingThickness=float(BIM_in.get('SheathingThick',1.0)), # default 1.0 + RoofSystem=str(ap_RoofSystem[roof_system]), # only valid for masonry structures + Garage=float(BIM_in.get('Garage',-1.0)), + LULC=BIM_in.get('LULC',-1), + MeanRoofHt=float(BIM_in.get('MeanRoofHt',15.0)), # default 15 + WindowArea=float(BIM_in.get('WindowArea',0.20)), + WindZone=str(BIM_in.get('WindZone', 'I')), + FloodZone =str(flood_zone) + )) if 'inundation' in hazards: + # maps for split level - ap_split_level = {'NO': 0, 'YES': 1} + ap_SplitLevel = { + 'NO': 0, + 'YES': 1 + } # foundation type - foundation = bim_in.get('FoundationType', 3501) + foundation = BIM_in.get('FoundationType',3501) # number of units - nunits = bim_in.get('NoUnits', 1) - - # flake8 - unused variable: `ap_FloodZone`. - # # maps for flood zone - # ap_FloodZone = { - # # Coastal areas with a 1% or greater chance of flooding and an - # # additional hazard associated with storm waves. - # 6101: 'VE', - # 6102: 'VE', - # 6103: 'AE', - # 6104: 'AE', - # 6105: 'AO', - # 6106: 'AE', - # 6107: 'AH', - # 6108: 'AO', - # 6109: 'A', - # 6110: 'X', - # 6111: 'X', - # 6112: 'X', - # 6113: 'OW', - # 6114: 'D', - # 6115: 'NA', - # 6119: 'NA', - # } - - # flake8 - unused variable: `floodzone_fema`. - # if isinstance(BIM_in['FloodZone'], int): - # # NJDEP code for flood zone (conversion to the FEMA designations) - # floodzone_fema = ap_FloodZone[BIM_in['FloodZone']] - # else: - # # standard input should follow the FEMA flood zone designations - # floodzone_fema = BIM_in['FloodZone'] + nunits = BIM_in.get('NoUnits',1) + + # maps for flood zone + ap_FloodZone = { + # Coastal areas with a 1% or greater chance of flooding and an + # additional hazard associated with storm waves. + 6101: 'VE', + 6102: 'VE', + 6103: 'AE', + 6104: 'AE', + 6105: 'AO', + 6106: 'AE', + 6107: 'AH', + 6108: 'AO', + 6109: 'A', + 6110: 'X', + 6111: 'X', + 6112: 'X', + 6113: 'OW', + 6114: 'D', + 6115: 'NA', + 6119: 'NA' + } + if type(BIM_in['FloodZone']) == int: + # NJDEP code for flood zone (conversion to the FEMA designations) + floodzone_fema = ap_FloodZone[BIM_in['FloodZone']] + else: + # standard input should follow the FEMA flood zone designations + floodzone_fema = BIM_in['FloodZone'] # add the parsed data to the BIM dict - bim.update( - { - 'DesignLevel': str( - ap_design_level[design_level] - ), # default engineered - 'NumberOfUnits': int(nunits), - 'FirstFloorElevation': float(bim_in.get('FirstFloorHt1', 10.0)), - 'SplitLevel': bool( - ap_split_level[bim_in.get('SplitLevel', 'NO')] - ), # dfault: no - 'FoundationType': int(foundation), # default: pile - 'City': bim_in.get('City', 'NA'), - } - ) + BIM.update(dict( + DesignLevel=str(ap_DesignLevel[design_level]), # default engineered + NumberOfUnits=int(nunits), + FirstFloorElevation=float(BIM_in.get('FirstFloorHt1',10.0)), + SplitLevel=bool(ap_SplitLevel[BIM_in.get('SplitLevel','NO')]), # dfault: no + FoundationType=int(foundation), # default: pile + City=BIM_in.get('City','NA') + )) # add inferred, generic meta-variables if 'wind' in hazards: + # Hurricane-Prone Region (HRP) # Areas vulnerable to hurricane, defined as the U.S. Atlantic Ocean and # Gulf of Mexico coasts where the ultimate design wind speed, V_ult is # greater than a pre-defined limit. - if bim['YearBuilt'] >= 2016: + if BIM['YearBuilt'] >= 2016: # The limit is 115 mph in IRC 2015 - hpr = bim['V_ult'] > 115.0 + HPR = BIM['V_ult'] > 115.0 else: # The limit is 90 mph in IRC 2009 and earlier versions - hpr = bim['V_ult'] > 90.0 + HPR = BIM['V_ult'] > 90.0 # Wind Borne Debris # Areas within hurricane-prone regions are affected by debris if one of @@ -349,30 +333,25 @@ def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: # (2) In areas where the ultimate design wind speed is greater than # general_lim # The flood_lim and general_lim limits depend on the year of construction - if bim['YearBuilt'] >= 2016: + if BIM['YearBuilt'] >= 2016: # In IRC 2015: - flood_lim = 130.0 # mph - general_lim = 140.0 # mph + flood_lim = 130.0 # mph + general_lim = 140.0 # mph else: # In IRC 2009 and earlier versions - flood_lim = 110.0 # mph - general_lim = 120.0 # mph + flood_lim = 110.0 # mph + general_lim = 120.0 # mph # Areas within hurricane-prone regions located in accordance with # one of the following: # (1) Within 1 mile (1.61 km) of the coastal mean high water line # where the ultimate design wind speed is 130 mph (58m/s) or greater. # (2) In areas where the ultimate design wind speed is 140 mph (63.5m/s) # or greater. (Definitions: Chapter 2, 2015 NJ Residential Code) - if not hpr: - wbd = False + if not HPR: + WBD = False else: - wbd = ( - ( - bim['FloodZone'].startswith('A') - or bim['FloodZone'].startswith('V') - ) - and bim['V_ult'] >= flood_lim - ) or (bim['V_ult'] >= general_lim) + WBD = (((BIM['FloodZone'].startswith('A') or BIM['FloodZone'].startswith('V')) and + BIM['V_ult'] >= flood_lim) or (BIM['V_ult'] >= general_lim)) # Terrain # open (0.03) = 3 @@ -380,92 +359,68 @@ def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: # suburban (0.35) = 35 # light trees (0.70) = 70 # trees (1.00) = 100 - # Mapped to Land Use Categories in NJ (see - # https://www.state.nj.us/dep/gis/ - # digidownload/metadata/lulc02/anderson2002.html) by T. Wu - # group (see internal report on roughness calculations, Table - # 4). These are mapped to Hazus definitions as follows: Open - # Water (5400s) with zo=0.01 and barren land (7600) with - # zo=0.04 assume Open Open Space Developed, Low Intensity - # Developed, Medium Intensity Developed (1110-1140) assumed - # zo=0.35-0.4 assume Suburban High Intensity Developed (1600) - # with zo=0.6 assume Lt. Tree Forests of all classes - # (4100-4300) assumed zo=0.6 assume Lt. Tree Shrub (4400) with - # zo=0.06 assume Open Grasslands, pastures and agricultural - # areas (2000 series) with zo=0.1-0.15 assume Lt. Suburban - # Woody Wetlands (6250) with zo=0.3 assume suburban Emergent - # Herbaceous Wetlands (6240) with zo=0.03 assume Open - # Note: HAZUS category of trees (1.00) does not apply to any - # LU/LC in NJ - terrain = 15 # Default in Reorganized Rulesets - WIND - if location == 'NJ': - if bim['FloodZone'].startswith('V') or bim['FloodZone'] in { - 'A', - 'AE', - 'A1-30', - 'AR', - 'A99', - }: + # Mapped to Land Use Categories in NJ (see https://www.state.nj.us/dep/gis/ + # digidownload/metadata/lulc02/anderson2002.html) by T. Wu group + # (see internal report on roughness calculations, Table 4). + # These are mapped to Hazus defintions as follows: + # Open Water (5400s) with zo=0.01 and barren land (7600) with zo=0.04 assume Open + # Open Space Developed, Low Intensity Developed, Medium Intensity Developed + # (1110-1140) assumed zo=0.35-0.4 assume Suburban + # High Intensity Developed (1600) with zo=0.6 assume Lt. Tree + # Forests of all classes (4100-4300) assumed zo=0.6 assume Lt. Tree + # Shrub (4400) with zo=0.06 assume Open + # Grasslands, pastures and agricultural areas (2000 series) with + # zo=0.1-0.15 assume Lt. Suburban + # Woody Wetlands (6250) with zo=0.3 assume suburban + # Emergent Herbaceous Wetlands (6240) with zo=0.03 assume Open + # Note: HAZUS category of trees (1.00) does not apply to any LU/LC in NJ + terrain = 15 # Default in Reorganized Rulesets - WIND + if location == "NJ": + if (BIM['FloodZone'].startswith('V') or BIM['FloodZone'] in ['A', 'AE', 'A1-30', 'AR', 'A99']): terrain = 3 - elif ((bim['LULC'] >= 5000) and (bim['LULC'] <= 5999)) or ( - ((bim['LULC'] == 4400) or (bim['LULC'] == 6240)) - or (bim['LULC'] == 7600) - ): - terrain = 3 # Open - elif (bim['LULC'] >= 2000) and (bim['LULC'] <= 2999): - terrain = 15 # Light suburban - elif ((bim['LULC'] >= 1110) and (bim['LULC'] <= 1140)) or ( - (bim['LULC'] >= 6250) and (bim['LULC'] <= 6252) - ): - terrain = 35 # Suburban - elif ((bim['LULC'] >= 4100) and (bim['LULC'] <= 4300)) or ( - bim['LULC'] == 1600 - ): - terrain = 70 # light trees - elif location == 'LA': - if bim['FloodZone'].startswith('V') or bim['FloodZone'] in { - 'A', - 'AE', - 'A1-30', - 'AR', - 'A99', - }: + elif ((BIM['LULC'] >= 5000) and (BIM['LULC'] <= 5999)): + terrain = 3 # Open + elif ((BIM['LULC'] == 4400) or (BIM['LULC'] == 6240)) or (BIM['LULC'] == 7600): + terrain = 3 # Open + elif ((BIM['LULC'] >= 2000) and (BIM['LULC'] <= 2999)): + terrain = 15 # Light suburban + elif ((BIM['LULC'] >= 1110) and (BIM['LULC'] <= 1140)) or ((BIM['LULC'] >= 6250) and (BIM['LULC'] <= 6252)): + terrain = 35 # Suburban + elif ((BIM['LULC'] >= 4100) and (BIM['LULC'] <= 4300)) or (BIM['LULC'] == 1600): + terrain = 70 # light trees + elif location == "LA": + if (BIM['FloodZone'].startswith('V') or BIM['FloodZone'] in ['A', 'AE', 'A1-30', 'AR', 'A99']): terrain = 3 - elif ((bim['LULC'] >= 50) and (bim['LULC'] <= 59)) or ( - ((bim['LULC'] == 44) or (bim['LULC'] == 62)) or (bim['LULC'] == 76) - ): - terrain = 3 # Open - elif (bim['LULC'] >= 20) and (bim['LULC'] <= 29): - terrain = 15 # Light suburban - elif (bim['LULC'] == 11) or (bim['LULC'] == 61): - terrain = 35 # Suburban - elif ((bim['LULC'] >= 41) and (bim['LULC'] <= 43)) or ( - bim['LULC'] in {16, 17} - ): - terrain = 70 # light trees - - bim.update( - { - # Nominal Design Wind Speed - # Former term was “Basic Wind Speed”; it is now the “Nominal Design - # Wind Speed (V_asd). Unit: mph." - 'V_asd': np.sqrt(0.6 * bim['V_ult']), - 'HazardProneRegion': hpr, - 'WindBorneDebris': wbd, - 'TerrainRoughness': terrain, - } - ) + elif ((BIM['LULC'] >= 50) and (BIM['LULC'] <= 59)): + terrain = 3 # Open + elif ((BIM['LULC'] == 44) or (BIM['LULC'] == 62)) or (BIM['LULC'] == 76): + terrain = 3 # Open + elif ((BIM['LULC'] >= 20) and (BIM['LULC'] <= 29)): + terrain = 15 # Light suburban + elif (BIM['LULC'] == 11) or (BIM['LULC'] == 61): + terrain = 35 # Suburban + elif ((BIM['LULC'] >= 41) and (BIM['LULC'] <= 43)) or (BIM['LULC'] in [16, 17]): + terrain = 70 # light trees + + BIM.update(dict( + # Nominal Design Wind Speed + # Former term was “Basic Wind Speed”; it is now the “Nominal Design + # Wind Speed (V_asd). Unit: mph." + V_asd = np.sqrt(0.6 * BIM['V_ult']), + + HazardProneRegion=HPR, + WindBorneDebris=WBD, + TerrainRoughness=terrain, + )) if 'inundation' in hazards: - bim.update( - { - # Flood Risk - # Properties in the High Water Zone (within 1 mile of - # the coast) are at risk of flooding and other - # wind-borne debris action. - # TODO: need high water zone for this and move it to inputs! # noqa: TD002 - 'FloodRisk': True, - } - ) - - return bim + + BIM.update(dict( + # Flood Risk + # Properties in the High Water Zone (within 1 mile of the coast) are at + # risk of flooding and other wind-borne debris action. + FloodRisk=True, # TODO: need high water zone for this and move it to inputs! + )) + + return BIM + diff --git a/pelicun/tests/dl_calculation/rulesets/WindCECBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindCECBRulesets.py index 5b52b7a45..c034a6b4f 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindCECBRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindCECBRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -43,11 +44,12 @@ # Tracy Kijewski-Correa import random +import numpy as np +import datetime - -def CECB_config(bim: dict) -> str: # noqa: C901 +def CECB_config(BIM): """ - Rules to identify a HAZUS CECB configuration based on BIM data. + Rules to identify a HAZUS CECB configuration based on BIM data Parameters ---------- @@ -57,25 +59,26 @@ def CECB_config(bim: dict) -> str: # noqa: C901 Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Roof cover - if bim['RoofShape'] in {'gab', 'hip'}: + if BIM['RoofShape'] in ['gab', 'hip']: roof_cover = 'bur' # Warning: HAZUS does not have N/A option for CECB, so here we use bur - elif year >= 1975: - roof_cover = 'spm' else: - # year < 1975 - roof_cover = 'bur' + if year >= 1975: + roof_cover = 'spm' + else: + # year < 1975 + roof_cover = 'bur' # shutters if year >= 2000: - shutters = bim['WindBorneDebris'] + shutters = BIM['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -85,51 +88,52 @@ def CECB_config(bim: dict) -> str: # noqa: C901 # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. - elif bim['WindBorneDebris']: - shutters = random.random() < 0.46 else: - shutters = False + if BIM['WindBorneDebris']: + shutters = random.random() < 0.46 + else: + shutters = False # Wind Debris (widd in HAZSU) # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None - widd = 'C' # residential (default) - if bim['OccupancyClass'] in {'RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D'}: - widd = 'C' # residential - elif bim['OccupancyClass'] == 'AGR1': - widd = 'D' # None + WIDD = 'C' # residential (default) + if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', + 'RES3D']: + WIDD = 'C' # residential + elif BIM['OccupancyClass'] == 'AGR1': + WIDD = 'D' # None else: - widd = 'A' # Res/Comm + WIDD = 'A' # Res/Comm # Window area ratio - if bim['WindowArea'] < 0.33: - wwr = 'low' - elif bim['WindowArea'] < 0.5: - wwr = 'med' + if BIM['WindowArea'] < 0.33: + WWR = 'low' + elif BIM['WindowArea'] < 0.5: + WWR = 'med' else: - wwr = 'hig' + WWR = 'hig' - if bim['NumberOfStories'] <= 2: + if BIM['NumberOfStories'] <= 2: bldg_tag = 'C.ECB.L' - elif bim['NumberOfStories'] <= 5: + elif BIM['NumberOfStories'] <= 5: bldg_tag = 'C.ECB.M' else: bldg_tag = 'C.ECB.H' # extend the BIM dictionary - bim.update( - { - 'RoofCover': roof_cover, - 'Shutters': shutters, - 'WindowAreaRatio': wwr, - 'WindDebrisClass': widd, - } - ) + BIM.update(dict( + RoofCover = roof_cover, + Shutters = shutters, + WindowAreaRatio = WWR, + WindDebrisClass = WIDD + )) + + bldg_config = f"{bldg_tag}." \ + f"{roof_cover}." \ + f"{int(shutters)}." \ + f"{WIDD}." \ + f"{WWR}." \ + f"{int(BIM['TerrainRoughness'])}" + + return bldg_config - return ( - f"{bldg_tag}." - f"{roof_cover}." - f"{int(shutters)}." - f"{widd}." - f"{wwr}." - f"{int(bim['TerrainRoughness'])}" - ) diff --git a/pelicun/tests/dl_calculation/rulesets/WindCERBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindCERBRulesets.py index 0b75ebad3..41f8faab0 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindCERBRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindCERBRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -43,11 +44,12 @@ # Tracy Kijewski-Correa import random +import numpy as np +import datetime - -def CERB_config(bim: dict) -> str: # noqa: C901 +def CERB_config(BIM): """ - Rules to identify a HAZUS CERB configuration based on BIM data. + Rules to identify a HAZUS CERB configuration based on BIM data Parameters ---------- @@ -57,25 +59,26 @@ def CERB_config(bim: dict) -> str: # noqa: C901 Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Roof cover - if bim['RoofShape'] in {'gab', 'hip'}: + if BIM['RoofShape'] in ['gab', 'hip']: roof_cover = 'bur' # Warning: HAZUS does not have N/A option for CECB, so here we use bur - elif year >= 1975: - roof_cover = 'spm' else: - # year < 1975 - roof_cover = 'bur' + if year >= 1975: + roof_cover = 'spm' + else: + # year < 1975 + roof_cover = 'bur' # shutters if year >= 2000: - shutters = bim['WindBorneDebris'] + shutters = BIM['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -85,51 +88,51 @@ def CERB_config(bim: dict) -> str: # noqa: C901 # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. - elif bim['WindBorneDebris']: - shutters = random.random() < 0.45 else: - shutters = False + if BIM['WindBorneDebris']: + shutters = random.random() < 0.45 + else: + shutters = False # Wind Debris (widd in HAZUS) # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None - widd = 'C' # residential (default) - if bim['OccupancyClass'] in {'RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D'}: - widd = 'C' # residential - elif bim['OccupancyClass'] == 'AGR1': - widd = 'D' # None + WIDD = 'C' # residential (default) + if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', + 'RES3D']: + WIDD = 'C' # residential + elif BIM['OccupancyClass'] == 'AGR1': + WIDD = 'D' # None else: - widd = 'A' # Res/Comm + WIDD = 'A' # Res/Comm # Window area ratio - if bim['WindowArea'] < 0.33: - wwr = 'low' - elif bim['WindowArea'] < 0.5: - wwr = 'med' + if BIM['WindowArea'] < 0.33: + WWR = 'low' + elif BIM['WindowArea'] < 0.5: + WWR = 'med' else: - wwr = 'hig' + WWR = 'hig' - if bim['NumberOfStories'] <= 2: + if BIM['NumberOfStories'] <= 2: bldg_tag = 'C.ERB.L' - elif bim['NumberOfStories'] <= 5: + elif BIM['NumberOfStories'] <= 5: bldg_tag = 'C.ERB.M' else: bldg_tag = 'C.ERB.H' # extend the BIM dictionary - bim.update( - { - 'RoofCover': roof_cover, - 'Shutters': shutters, - 'WindowAreaRatio': wwr, - 'WindDebrisClass': widd, - } - ) + BIM.update(dict( + RoofCover = roof_cover, + Shutters = shutters, + WindowAreaRatio = WWR, + WindDebrisClass = WIDD + )) + + bldg_config = f"{bldg_tag}." \ + f"{roof_cover}." \ + f"{int(shutters)}." \ + f"{WIDD}." \ + f"{WWR}." \ + f"{int(BIM['TerrainRoughness'])}" - return ( - f"{bldg_tag}." - f"{roof_cover}." - f"{int(shutters)}." - f"{widd}." - f"{wwr}." - f"{int(bim['TerrainRoughness'])}" - ) + return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindEFRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindEFRulesets.py index bd81df21b..1762eb5ce 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindEFRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindEFRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -42,13 +43,14 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import datetime import random +import numpy as np +import datetime -def HUEFFS_config(bim: dict) -> str: +def HUEFFS_config(BIM): """ - Rules to identify a HAZUS HUEFFS/HUEFSS configuration based on BIM data. + Rules to identify a HAZUS HUEFFS/HUEFSS configuration based on BIM data Parameters ---------- @@ -58,62 +60,63 @@ def HUEFFS_config(bim: dict) -> str: Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Roof cover - roof_cover = 'spm' if year >= 1975 else 'bur' + if year >= 1975: + roof_cover = 'spm' + else: + # year < 1975 + roof_cover = 'bur' # Wind debris - widd = 'A' + WIDD = 'A' # Roof deck age - if year >= (datetime.datetime.now(tz=datetime.timezone.utc).year - 50): - dq = 'god' # new or average + if year >= (datetime.datetime.now().year - 50): + DQ = 'god' # new or average else: - dq = 'por' # old + DQ = 'por' # old # Metal-RDA if year > 2000: - if bim['V_ult'] <= 142: - mrda = 'std' # standard + if BIM['V_ult'] <= 142: + MRDA = 'std' # standard else: - mrda = 'sup' # superior + MRDA = 'sup' # superior else: - mrda = 'std' # standard + MRDA = 'std' # standard # Shutters - shutters = int(bim['WBD']) + shutters = int(BIM['WBD']) # extend the BIM dictionary - bim.update( - { - 'RoofCover': roof_cover, - 'RoofDeckAttachmentM': mrda, - 'RoofDeckAge': dq, - 'WindDebrisClass': widd, - 'Shutters': shutters, - } - ) + BIM.update(dict( + RoofCover = roof_cover, + RoofDeckAttachmentM = MRDA, + RoofDeckAge=DQ, + WindDebrisClass = WIDD, + Shutters = shutters + )) bldg_tag = 'HUEF.FS' - return ( - f"{bldg_tag}." - f"{roof_cover}." - f"{shutters}." - f"{widd}." - f"{dq}." - f"{mrda}." - f"{int(bim['TerrainRoughness'])}" - ) - - -def HUEFSS_config(bim: dict) -> str: + bldg_config = f"{bldg_tag}." \ + f"{roof_cover}." \ + f"{shutters}." \ + f"{WIDD}." \ + f"{DQ}." \ + f"{MRDA}." \ + f"{int(BIM['TerrainRoughness'])}" + + return bldg_config + +def HUEFSS_config(BIM): """ - Rules to identify a HAZUS HUEFFS/HUEFSS configuration based on BIM data. + Rules to identify a HAZUS HUEFFS/HUEFSS configuration based on BIM data Parameters ---------- @@ -123,11 +126,11 @@ def HUEFSS_config(bim: dict) -> str: Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Roof cover if year >= 1975: @@ -137,52 +140,50 @@ def HUEFSS_config(bim: dict) -> str: roof_cover = 'bur' # Wind debris - widd = 'A' + WIDD = 'A' # Roof deck age - if year >= (datetime.datetime.now(tz=datetime.timezone.utc).year - 50): - dq = 'god' # new or average + if year >= (datetime.datetime.now().year - 50): + DQ = 'god' # new or average else: - dq = 'por' # old + DQ = 'por' # old # Metal-RDA if year > 2000: - if bim['V_ult'] <= 142: - mrda = 'std' # standard + if BIM['V_ult'] <= 142: + MRDA = 'std' # standard else: - mrda = 'sup' # superior + MRDA = 'sup' # superior else: - mrda = 'std' # standard + MRDA = 'std' # standard # Shutters - shutters = bim['WindBorneDebris'] + shutters = BIM['WindBorneDebris'] # extend the BIM dictionary - bim.update( - { - 'RoofCover': roof_cover, - 'RoofDeckAttachmentM': mrda, - 'RoofDeckAge': dq, - 'WindDebrisClass': widd, - 'Shutters': shutters, - } - ) + BIM.update(dict( + RoofCover = roof_cover, + RoofDeckAttachmentM = MRDA, + RoofDeckAge=DQ, + WindDebrisClass = WIDD, + Shutters=shutters + )) bldg_tag = 'HUEF.S.S' - return ( - f"{bldg_tag}." - f"{roof_cover}." - f"{int(shutters)}." - f"{widd}." - f"{dq}." - f"{mrda}." - f"{int(bim['TerrainRoughness'])}" - ) - - -def HUEFH_config(bim: dict) -> str: + bldg_config = f"{bldg_tag}." \ + f"{roof_cover}." \ + f"{int(shutters)}." \ + f"{WIDD}." \ + f"{DQ}." \ + f"{MRDA}." \ + f"{int(BIM['TerrainRoughness'])}" + + return bldg_config + + +def HUEFH_config(BIM): """ - Rules to identify a HAZUS HUEFH configuration based on BIM data. + Rules to identify a HAZUS HUEFH configuration based on BIM data Parameters ---------- @@ -192,11 +193,11 @@ def HUEFH_config(bim: dict) -> str: Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Roof cover if year >= 1975: @@ -206,50 +207,47 @@ def HUEFH_config(bim: dict) -> str: roof_cover = 'bur' # Wind debris - widd = 'A' + WIDD = 'A' # Shutters - shutters = bim['WindBorneDebris'] + shutters = BIM['WindBorneDebris'] # Metal-RDA if year > 2000: - if bim['V_ult'] <= 142: - mrda = 'std' # standard + if BIM['V_ult'] <= 142: + MRDA = 'std' # standard else: - mrda = 'sup' # superior + MRDA = 'sup' # superior else: - mrda = 'std' # standard + MRDA = 'std' # standard - if bim['NumberOfStories'] <= 2: + if BIM['NumberOfStories'] <=2: bldg_tag = 'HUEF.H.S' - elif bim['NumberOfStories'] <= 5: + elif BIM['NumberOfStories'] <= 5: bldg_tag = 'HUEF.H.M' else: bldg_tag = 'HUEF.H.L' # extend the BIM dictionary - bim.update( - { - 'RoofCover': roof_cover, - 'RoofDeckAttachmentM': mrda, - 'WindDebrisClass': widd, - 'Shutters': shutters, - } - ) - - return ( - f"{bldg_tag}." - f"{roof_cover}." - f"{widd}." - f"{mrda}." - f"{int(shutters)}." - f"{int(bim['TerrainRoughness'])}" - ) - - -def HUEFS_config(bim: dict) -> str: + BIM.update(dict( + RoofCover = roof_cover, + RoofDeckAttachmentM = MRDA, + WindDebrisClass = WIDD, + Shutters=shutters + )) + + bldg_config = f"{bldg_tag}." \ + f"{roof_cover}." \ + f"{WIDD}." \ + f"{MRDA}." \ + f"{int(shutters)}." \ + f"{int(BIM['TerrainRoughness'])}" + + return bldg_config + +def HUEFS_config(BIM): """ - Rules to identify a HAZUS HUEFS configuration based on BIM data. + Rules to identify a HAZUS HUEFS configuration based on BIM data Parameters ---------- @@ -259,11 +257,11 @@ def HUEFS_config(bim: dict) -> str: Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Roof cover if year >= 1975: @@ -273,43 +271,46 @@ def HUEFS_config(bim: dict) -> str: roof_cover = 'bur' # Wind debris - widd = 'C' + WIDD = 'C' # Shutters if year > 2000: - shutters = bim['WindBorneDebris'] - elif bim['WindBorneDebris']: - shutters = random.random() < 0.46 + shutters = BIM['WindBorneDebris'] else: - shutters = False + # year <= 2000 + if BIM['WindBorneDebris']: + shutters = random.random() < 0.46 + else: + shutters = False # Metal-RDA if year > 2000: - if bim['V_ult'] <= 142: - mrda = 'std' # standard + if BIM['V_ult'] <= 142: + MRDA = 'std' # standard else: - mrda = 'sup' # superior + MRDA = 'sup' # superior else: - mrda = 'std' # standard + MRDA = 'std' # standard - bldg_tag = 'HUEF.S.M' if bim['NumberOfStories'] <= 2 else 'HUEF.S.L' + if BIM['NumberOfStories'] <=2: + bldg_tag = 'HUEF.S.M' + else: + bldg_tag = 'HUEF.S.L' # extend the BIM dictionary - bim.update( - { - 'RoofCover': roof_cover, - 'RoofDeckAttachmentM': mrda, - 'WindDebrisClass': widd, - 'Shutters': shutters, - } - ) - - return ( - f"{bldg_tag}." - f"{roof_cover}." - f"{int(shutters)}." - f"{widd}." - f"null." - f"{mrda}." - f"{int(bim['TerrainRoughness'])}" - ) + BIM.update(dict( + RoofCover = roof_cover, + RoofDeckAttachmentM = MRDA, + WindDebrisClass = WIDD, + Shutters=shutters + )) + + bldg_config = f"{bldg_tag}." \ + f"{roof_cover}." \ + f"{int(shutters)}." \ + f"{WIDD}." \ + f"null." \ + f"{MRDA}." \ + f"{int(BIM['TerrainRoughness'])}" + + return bldg_config \ No newline at end of file diff --git a/pelicun/tests/dl_calculation/rulesets/WindMECBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMECBRulesets.py index 42db6dc07..137844f7b 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMECBRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMECBRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -44,10 +45,9 @@ import random - -def MECB_config(bim: dict) -> str: # noqa: C901 +def MECB_config(BIM): """ - Rules to identify a HAZUS MECB configuration based on BIM data. + Rules to identify a HAZUS MECB configuration based on BIM data Parameters ---------- @@ -57,84 +57,85 @@ def MECB_config(bim: dict) -> str: # noqa: C901 Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Roof cover - if bim['RoofShape'] in {'gab', 'hip'}: + if BIM['RoofShape'] in ['gab', 'hip']: roof_cover = 'bur' - # no info, using the default supported by HAZUS - elif year >= 1975: - roof_cover = 'spm' + # no info, using the default supoorted by HAZUS else: - # year < 1975 - roof_cover = 'bur' + if year >= 1975: + roof_cover = 'spm' + else: + # year < 1975 + roof_cover = 'bur' # shutters if year >= 2000: - shutters = bim['WindBorneDebris'] - elif bim['WindBorneDebris']: - shutters = random.random() < 0.46 + shutters = BIM['WindBorneDebris'] else: - shutters = False + if BIM['WindBorneDebris']: + shutters = random.random() < 0.46 + else: + shutters = False # Wind Debris (widd in HAZSU) # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None - widd = 'C' # residential (default) - if bim['OccupancyClass'] in {'RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D'}: - widd = 'C' # residential - elif bim['OccupancyClass'] == 'AGR1': - widd = 'D' # None + WIDD = 'C' # residential (default) + if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', + 'RES3D']: + WIDD = 'C' # residential + elif BIM['OccupancyClass'] == 'AGR1': + WIDD = 'D' # None else: - widd = 'A' # Res/Comm + WIDD = 'A' # Res/Comm # Metal RDA # 1507.2.8.1 High Wind Attachment. # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with - # the manufacturer's instructions. Fasteners are to be applied along + # the manufacturer’s instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. - if bim['V_ult'] > 142: - mrda = 'std' # standard + if BIM['V_ult'] > 142: + MRDA = 'std' # standard else: - mrda = 'sup' # superior + MRDA = 'sup' # superior # Window area ratio - if bim['WindowArea'] < 0.33: - wwr = 'low' - elif bim['WindowArea'] < 0.5: - wwr = 'med' + if BIM['WindowArea'] < 0.33: + WWR = 'low' + elif BIM['WindowArea'] < 0.5: + WWR = 'med' else: - wwr = 'hig' + WWR = 'hig' - if bim['NumberOfStories'] <= 2: + if BIM['NumberOfStories'] <= 2: bldg_tag = 'M.ECB.L' - elif bim['NumberOfStories'] <= 5: + elif BIM['NumberOfStories'] <= 5: bldg_tag = 'M.ECB.M' else: bldg_tag = 'M.ECB.H' # extend the BIM dictionary - bim.update( - { - 'RoofCover': roof_cover, - 'RoofDeckAttachmentM': mrda, - 'Shutters': shutters, - 'WindowAreaRatio': wwr, - 'WindDebrisClass': widd, - } - ) + BIM.update(dict( + RoofCover = roof_cover, + RoofDeckAttachmentM = MRDA, + Shutters = shutters, + WindowAreaRatio = WWR, + WindDebrisClass = WIDD + )) + + bldg_config = f"{bldg_tag}." \ + f"{roof_cover}." \ + f"{int(shutters)}." \ + f"{WIDD}." \ + f"{MRDA}." \ + f"{WWR}." \ + f"{int(BIM['TerrainRoughness'])}" - return ( - f"{bldg_tag}." - f"{roof_cover}." - f"{int(shutters)}." - f"{widd}." - f"{mrda}." - f"{wwr}." - f"{int(bim['TerrainRoughness'])}" - ) + return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindMERBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMERBRulesets.py index 4158cd74e..2299b8dbb 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMERBRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMERBRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -43,11 +44,12 @@ # Tracy Kijewski-Correa import random +import numpy as np +import datetime - -def MERB_config(bim: dict) -> str: # noqa: C901 +def MERB_config(BIM): """ - Rules to identify a HAZUS MERB configuration based on BIM data. + Rules to identify a HAZUS MERB configuration based on BIM data Parameters ---------- @@ -57,84 +59,85 @@ def MERB_config(bim: dict) -> str: # noqa: C901 Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Roof cover - if bim['RoofShape'] in {'gab', 'hip'}: + if BIM['RoofShape'] in ['gab', 'hip']: roof_cover = 'bur' - # no info, using the default supported by HAZUS - elif year >= 1975: - roof_cover = 'spm' + # no info, using the default supoorted by HAZUS else: - # year < 1975 - roof_cover = 'bur' + if year >= 1975: + roof_cover = 'spm' + else: + # year < 1975 + roof_cover = 'bur' # shutters if year >= 2000: - shutters = bim['WindBorneDebris'] - elif bim['WindBorneDebris']: - shutters = random.random() < 0.45 + shutters = BIM['WindBorneDebris'] else: - shutters = False + if BIM['WindBorneDebris']: + shutters = random.random() < 0.45 + else: + shutters = False # Wind Debris (widd in HAZSU) # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None - widd = 'C' # residential (default) - if bim['OccupancyClass'] in {'RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D'}: - widd = 'C' # residential - elif bim['OccupancyClass'] == 'AGR1': - widd = 'D' # None + WIDD = 'C' # residential (default) + if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', + 'RES3D']: + WIDD = 'C' # residential + elif BIM['OccupancyClass'] == 'AGR1': + WIDD = 'D' # None else: - widd = 'A' # Res/Comm + WIDD = 'A' # Res/Comm # Metal RDA # 1507.2.8.1 High Wind Attachment. # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with - # the manufacturer's instructions. Fasteners are to be applied along + # the manufacturer’s instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. - if bim['V_ult'] > 142: - mrda = 'std' # standard + if BIM['V_ult'] > 142: + MRDA = 'std' # standard else: - mrda = 'sup' # superior + MRDA = 'sup' # superior # Window area ratio - if bim['WindowArea'] < 0.33: - wwr = 'low' - elif bim['WindowArea'] < 0.5: - wwr = 'med' + if BIM['WindowArea'] < 0.33: + WWR = 'low' + elif BIM['WindowArea'] < 0.5: + WWR = 'med' else: - wwr = 'hig' + WWR = 'hig' - if bim['NumberOfStories'] <= 2: + if BIM['NumberOfStories'] <= 2: bldg_tag = 'M.ERB.L' - elif bim['NumberOfStories'] <= 5: + elif BIM['NumberOfStories'] <= 5: bldg_tag = 'M.ERB.M' else: bldg_tag = 'M.ERB.H' # extend the BIM dictionary - bim.update( - { - 'RoofCover': roof_cover, - 'RoofDeckAttachmentM': mrda, - 'Shutters': shutters, - 'WindowAreaRatio': wwr, - 'WindDebrisClass': widd, - } - ) + BIM.update(dict( + RoofCover = roof_cover, + RoofDeckAttachmentM = MRDA, + Shutters = shutters, + WindowAreaRatio = WWR, + WindDebrisClass = WIDD + )) + + bldg_config = f"{bldg_tag}." \ + f"{roof_cover}." \ + f"{int(shutters)}." \ + f"{WIDD}." \ + f"{MRDA}." \ + f"{WWR}." \ + f"{int(BIM['TerrainRoughness'])}" - return ( - f"{bldg_tag}." - f"{roof_cover}." - f"{int(shutters)}." - f"{widd}." - f"{mrda}." - f"{wwr}." - f"{int(bim['TerrainRoughness'])}" - ) + return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindMHRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMHRulesets.py index 37480fc17..db6ebe8a3 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMHRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMHRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -43,11 +44,12 @@ # Tracy Kijewski-Correa import random +import numpy as np +import datetime - -def MH_config(bim: dict) -> str: +def MH_config(BIM): """ - Rules to identify a HAZUS WSF configuration based on BIM data. + Rules to identify a HAZUS WSF configuration based on BIM data Parameters ---------- @@ -57,44 +59,55 @@ def MH_config(bim: dict) -> str: Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity if year <= 1976: # MHPHUD bldg_tag = 'MH.PHUD' - shutters = random.random() < 0.45 if bim['WindBorneDebris'] else False + if BIM['WindBorneDebris']: + shutters = random.random() < 0.45 + else: + shutters = False # TieDowns - tie_downs = random.random() < 0.45 + TD = random.random() < 0.45 elif year <= 1994: # MH76HUD bldg_tag = 'MH.76HUD' - shutters = random.random() < 0.45 if bim['WindBorneDebris'] else False + if BIM['WindBorneDebris']: + shutters = random.random() < 0.45 + else: + shutters = False # TieDowns - tie_downs = random.random() < 0.45 + TD = random.random() < 0.45 else: # MH94HUD I, II, III - shutters = bim['V_ult'] >= 100.0 + if BIM['V_ult'] >= 100.0: + shutters = True + else: + shutters = False # TieDowns - tie_downs = bim['V_ult'] >= 70.0 + if BIM['V_ult'] >= 70.0: + TD = True + else: + TD = False - bldg_tag = 'MH.94HUD' + bim['WindZone'] + bldg_tag = 'MH.94HUD' + BIM['WindZone'] # extend the BIM dictionary - bim.update( - { - 'TieDowns': tie_downs, - 'Shutters': shutters, - } - ) + BIM.update(dict( + TieDowns = TD, + Shutters = shutters, + )) + + bldg_config = f"{bldg_tag}." \ + f"{int(shutters)}." \ + f"{int(TD)}." \ + f"{int(BIM['TerrainRoughness'])}" + + return bldg_config - return ( - f"{bldg_tag}." - f"{int(shutters)}." - f"{int(tie_downs)}." - f"{int(bim['TerrainRoughness'])}" - ) diff --git a/pelicun/tests/dl_calculation/rulesets/WindMLRIRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMLRIRulesets.py index 3a46c7199..09b833976 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMLRIRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMLRIRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -42,12 +43,13 @@ # Meredith Lockhead # Tracy Kijewski-Correa +import random +import numpy as np import datetime - -def MLRI_config(bim: dict) -> str: +def MLRI_config(BIM): """ - Rules to identify a HAZUS MLRI configuration based on BIM data. + Rules to identify a HAZUS MLRI configuration based on BIM data Parameters ---------- @@ -57,14 +59,14 @@ def MLRI_config(bim: dict) -> str: Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # MR - mr = True + MR = True # Shutters shutters = False @@ -74,50 +76,46 @@ def MLRI_config(bim: dict) -> str: # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with - # the manufacturer's instructions. Fasteners are to be applied along + # the manufacturer’s instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. - if bim['V_ult'] > 142: - mrda = 'std' # standard + if BIM['V_ult'] > 142: + MRDA = 'std' # standard else: - mrda = 'sup' # superior + MRDA = 'sup' # superior - if bim['RoofShape'] in {'gab', 'hip'}: + if BIM['RoofShape'] in ['gab', 'hip']: roof_cover = 'null' - roof_quality = 'god' # default supported by HAZUS - elif year >= 1975: - roof_cover = 'spm' - if bim['YearBuilt'] >= ( - datetime.datetime.now(tz=datetime.timezone.utc).year - 35 - ): - roof_quality = 'god' - else: - roof_quality = 'por' + roof_quality = 'god' # default supported by HAZUS else: - # year < 1975 - roof_cover = 'bur' - if bim['YearBuilt'] >= ( - datetime.datetime.now(tz=datetime.timezone.utc).year - 30 - ): - roof_quality = 'god' + if year >= 1975: + roof_cover = 'spm' + if BIM['YearBuilt'] >= (datetime.datetime.now().year - 35): + roof_quality = 'god' + else: + roof_quality = 'por' else: - roof_quality = 'por' - + # year < 1975 + roof_cover = 'bur' + if BIM['YearBuilt'] >= (datetime.datetime.now().year - 30): + roof_quality = 'god' + else: + roof_quality = 'por' + # extend the BIM dictionary - bim.update( - { - 'RoofCover': roof_cover, - 'RoofQuality': roof_quality, - 'RoofDeckAttachmentM': mrda, - 'Shutters': shutters, - 'MasonryReinforcing': mr, - } - ) + BIM.update(dict( + RoofCover = roof_cover, + RoofQuality = roof_quality, + RoofDeckAttachmentM = MRDA, + Shutters = shutters, + MasonryReinforcing = MR, + )) + + bldg_config = f"M.LRI." \ + f"{int(shutters)}." \ + f"{int(MR)}." \ + f"{roof_quality}." \ + f"{MRDA}." \ + f"{int(BIM['TerrainRoughness'])}" + + return bldg_config - return ( - f"M.LRI." - f"{int(shutters)}." - f"{int(mr)}." - f"{roof_quality}." - f"{mrda}." - f"{int(bim['TerrainRoughness'])}" - ) diff --git a/pelicun/tests/dl_calculation/rulesets/WindMLRMRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMLRMRulesets.py index 8354e8b30..c63f39313 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMLRMRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMLRMRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -42,13 +43,13 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import datetime import random +import numpy as np +import datetime - -def MLRM_config(bim: dict) -> str: # noqa: C901 +def MLRM_config(BIM): """ - Rules to identify a HAZUS MLRM configuration based on BIM data. + Rules to identify a HAZUS MLRM configuration based on BIM data Parameters ---------- @@ -58,17 +59,17 @@ def MLRM_config(bim: dict) -> str: # noqa: C901 Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Note the only roof option for commercial masonry in NJ appraisers manual # is OSWJ, so this suggests they do not even see alternate roof system # ref: Custom Inventory google spreadsheet H-37 10/01/20 # This could be commented for other regions if detailed data are available - bim['RoofSystem'] = 'ows' + BIM['RoofSystem'] = 'ows' # Roof cover # Roof cover does not apply to gable and hip roofs @@ -80,138 +81,135 @@ def MLRM_config(bim: dict) -> str: # noqa: C901 # Shutters # IRC 2000-2015: - # R301.2.1.2 in NJ IRC 2015 says protection of openings required - # for buildings located in WindBorneDebris regions, mentions - # impact-rated protection for glazing, impact-resistance for - # garage door glazed openings, and finally states that wood - # structural panels with a thickness > 7/16" and a span <8' can be - # used, as long as they are precut, attached to the framing - # surrounding the opening, and the attachments are resistant to - # corrosion and are able to resist component and cladding loads; + # R301.2.1.2 in NJ IRC 2015 says protection of openings required for + # buildings located in WindBorneDebris regions, mentions impact-rated protection for + # glazing, impact-resistance for garage door glazed openings, and finally + # states that wood structural panels with a thickness > 7/16" and a + # span <8' can be used, as long as they are precut, attached to the framing + # surrounding the opening, and the attachments are resistant to corrosion + # and are able to resist component and cladding loads; # Earlier IRC editions provide similar rules. - shutters = bim['WindBorneDebris'] + shutters = BIM['WindBorneDebris'] # Masonry Reinforcing (MR) - # R606.6.4.1.2 Metal Reinforcement states that walls other than - # interior non-load-bearing walls shall be anchored at vertical - # intervals of not more than 8 inches with joint reinforcement of - # not less than 9-gage. Therefore this ruleset assumes that all - # exterior or load-bearing masonry walls will have - # reinforcement. Since our considerations deal with wind speed, I - # made the assumption that only exterior walls are being taken + # R606.6.4.1.2 Metal Reinforcement states that walls other than interior + # non-load-bearing walls shall be anchored at vertical intervals of not + # more than 8 inches with joint reinforcement of not less than 9 gage. + # Therefore this ruleset assumes that all exterior or load-bearing masonry + # walls will have reinforcement. Since our considerations deal with wind + # speed, I made the assumption that only exterior walls are being taken # into consideration. - mr = True + MR = True # Wind Debris (widd in HAZSU) # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None - widd = 'C' # residential (default) - if bim['OccupancyClass'] in {'RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D'}: - widd = 'C' # residential - elif bim['OccupancyClass'] == 'AGR1': - widd = 'D' # None + WIDD = 'C' # residential (default) + if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', + 'RES3D']: + WIDD = 'C' # residential + elif BIM['OccupancyClass'] == 'AGR1': + WIDD = 'D' # None else: - widd = 'A' # Res/Comm + WIDD = 'A' # Res/Comm - if bim['RoofSystem'] == 'ows': + if BIM['RoofSystem'] == 'ows': # RDA - rda = 'null' # Doesn't apply to OWSJ + RDA = 'null' # Doesn't apply to OWSJ # Roof deck age (DQ) # Average lifespan of a steel joist roof is roughly 50 years according # to the source below. Therefore, if constructed 50 years before the # current year, the roof deck should be considered old. # https://www.metalroofing.systems/metal-roofing-pros-cons/ - if year >= (datetime.datetime.now(tz=datetime.timezone.utc).year - 50): - dq = 'god' # new or average + if year >= (datetime.datetime.now().year - 50): + DQ = 'god' # new or average else: - dq = 'por' # old + DQ = 'por' # old # RWC - rwc = 'null' # Doesn't apply to OWSJ + RWC = 'null' # Doesn't apply to OWSJ # Metal RDA # 1507.2.8.1 High Wind Attachment. # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with - # the manufacturer's instructions. Fasteners are to be applied along + # the manufacturer’s instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. - if bim['V_ult'] > 142: - mrda = 'std' # standard + if BIM['V_ult'] > 142: + MRDA = 'std' # standard else: - mrda = 'sup' # superior + MRDA = 'sup' # superior - elif bim['RoofSystem'] == 'trs': + elif BIM['RoofSystem'] == 'trs': # This clause should not be activated for NJ # RDA - if bim['TerrainRoughness'] >= 35: # suburban or light trees - if bim['V_ult'] > 130.0: - rda = '8s' # 8d @ 6"/6" 'D' + if BIM['TerrainRoughness'] >= 35: # suburban or light trees + if BIM['V_ult'] > 130.0: + RDA = '8s' # 8d @ 6"/6" 'D' else: - rda = '8d' # 8d @ 6"/12" 'B' - elif bim['V_ult'] > 110.0: - rda = '8s' # 8d @ 6"/6" 'D' - else: - rda = '8d' # 8d @ 6"/12" 'B' + RDA = '8d' # 8d @ 6"/12" 'B' + else: # light suburban or open + if BIM['V_ult'] > 110.0: + RDA = '8s' # 8d @ 6"/6" 'D' + else: + RDA = '8d' # 8d @ 6"/12" 'B' # Metal RDA - mrda = 'null' # Doesn't apply to Wood Truss + MRDA = 'null' # Doesn't apply to Wood Truss # Roof deck agea (DQ) - dq = 'null' # Doesn't apply to Wood Truss + DQ = 'null' # Doesn't apply to Wood Truss # RWC - if bim['V_ult'] > 110: - rwc = 'strap' # Strap + if BIM['V_ult'] > 110: + RWC = 'strap' # Strap else: - rwc = 'tnail' # Toe-nail + RWC = 'tnail' # Toe-nail # shutters if year >= 2000: - shutters = bim['WindBorneDebris'] - elif bim['WindBorneDebris']: - shutters = random.random() < 0.46 + shutters = BIM['WindBorneDebris'] else: - shutters = False + if BIM['WindBorneDebris']: + shutters = random.random() < 0.46 + else: + shutters = False - if bim['MeanRoofHt'] < 15.0: + if BIM['MeanRoofHt'] < 15.0: # extend the BIM dictionary - bim.update( - { - 'RoofCover': roof_cover, - 'RoofDeckAttachmentW': rda, - 'RoofDeckAttachmentM': mrda, - 'RoofDeckAge': dq, - 'RoofToWallConnection': rwc, - 'Shutters': shutters, - 'MasonryReinforcing': mr, - 'WindowAreaRatio': widd, - } - ) + BIM.update(dict( + RoofCover = roof_cover, + RoofDeckAttachmentW = RDA, + RoofDeckAttachmentM = MRDA, + RoofDeckAge = DQ, + RoofToWallConnection = RWC, + Shutters = shutters, + MasonryReinforcing = MR, + WindowAreaRatio = WIDD + )) # if it's MLRM1, configure outputs - bldg_config = ( - f"M.LRM.1." - f"{roof_cover}." - f"{int(shutters)}." - f"{int(mr)}." - f"{widd}." - f"{bim['RoofSystem']}." - f"{rda}." - f"{rwc}." - f"{dq}." - f"{mrda}." - f"{int(bim['TerrainRoughness'])}" - ) + bldg_config = f"M.LRM.1." \ + f"{roof_cover}." \ + f"{int(shutters)}." \ + f"{int(MR)}." \ + f"{WIDD}." \ + f"{BIM['RoofSystem']}." \ + f"{RDA}." \ + f"{RWC}." \ + f"{DQ}." \ + f"{MRDA}." \ + f"{int(BIM['TerrainRoughness'])}" else: unit_tag = 'null' # MLRM2 needs more rulesets - if bim['RoofSystem'] == 'trs': - joist_spacing: int | str = 'null' - elif bim['RoofSystem'] == 'ows': - if bim['NumberOfUnits'] == 1: + if BIM['RoofSystem'] == 'trs': + joist_spacing = 'null' + elif BIM['RoofSystem'] == 'ows': + if BIM['NumberOfUnits'] == 1: joist_spacing = 'null' unit_tag = 'sgl' else: @@ -219,34 +217,30 @@ def MLRM_config(bim: dict) -> str: # noqa: C901 unit_tag = 'mlt' # extend the BIM dictionary - bim.update( - { - 'RoofCover': roof_cover, - 'RoofDeckAttachmentW': rda, - 'RoofDeckAttachmentM': mrda, - 'RoofDeckAge': dq, - 'RoofToWallConnection': rwc, - 'Shutters': shutters, - 'MasonryReinforcing': mr, - 'WindDebrisClass': widd, - 'UnitType': unit_tag, - } - ) - - bldg_config = ( - f"M.LRM.2." - f"{roof_cover}." - f"{int(shutters)}." - f"{int(mr)}." - f"{widd}." - f"{bim['RoofSystem']}." - f"{rda}." - f"{rwc}." - f"{dq}." - f"{mrda}." - f"{unit_tag}." - f"{joist_spacing}." - f"{int(bim['TerrainRoughness'])}" - ) - - return bldg_config + BIM.update(dict( + RoofCover = roof_cover, + RoofDeckAttachmentW = RDA, + RoofDeckAttachmentM = MRDA, + RoofDeckAge = DQ, + RoofToWallConnection = RWC, + Shutters = shutters, + MasonryReinforcing = MR, + WindDebrisClass = WIDD, + UnitType=unit_tag + )) + + bldg_config = f"M.LRM.2." \ + f"{roof_cover}." \ + f"{int(shutters)}." \ + f"{int(MR)}." \ + f"{WIDD}." \ + f"{BIM['RoofSystem']}." \ + f"{RDA}." \ + f"{RWC}." \ + f"{DQ}." \ + f"{MRDA}." \ + f"{unit_tag}." \ + f"{joist_spacing}." \ + f"{int(BIM['TerrainRoughness'])}" + + return bldg_config \ No newline at end of file diff --git a/pelicun/tests/dl_calculation/rulesets/WindMMUHRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMMUHRulesets.py index 83ded54f1..3d27cbe09 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMMUHRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMMUHRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -42,13 +43,12 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import datetime import random +import datetime - -def MMUH_config(bim: dict) -> str: # noqa: C901 +def MMUH_config(BIM): """ - Rules to identify a HAZUS MMUH configuration based on BIM data. + Rules to identify a HAZUS MMUH configuration based on BIM data Parameters ---------- @@ -58,25 +58,25 @@ def MMUH_config(bim: dict) -> str: # noqa: C901 Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Secondary Water Resistance (SWR) # Minimum drainage recommendations are in place in NJ (See below). # However, SWR indicates a code-plus practice. - swr: int | str = 'null' # Default - if bim['RoofShape'] == 'flt': - swr = 'null' - elif bim['RoofShape'] in {'hip', 'gab'}: - swr = int(random.random() < 0.6) + SWR = "null" # Default + if BIM['RoofShape'] == 'flt': + SWR = 'null' + elif BIM['RoofShape'] in ['hip', 'gab']: + SWR = int(random.random() < 0.6) # Roof cover & Roof quality # Roof cover and quality do not apply to gable and hip roofs - if bim['RoofShape'] in {'gab', 'hip'}: + if BIM['RoofShape'] in ['gab', 'hip']: roof_cover = 'null' roof_quality = 'null' @@ -93,29 +93,26 @@ def MMUH_config(bim: dict) -> str: # noqa: C901 # We assume that all flat roofs built before 1975 are BURs and all roofs # built after 1975 are SPMs. # Nothing in NJ Building Code or in the Hazus manual specifies what - # constitutes “good” and “poor” roof conditions, so ruleset is dependent + # constitutes “good” and “poor” roof conditions, so ruleset is dependant # on the age of the roof and average lifespan of BUR and SPM roofs. # We assume that the average lifespan of a BUR roof is 30 years and the # average lifespan of a SPM is 35 years. Therefore, BURs installed before # 1990 are in poor condition, and SPMs installed before 1985 are in poor # condition. - elif year >= 1975: - roof_cover = 'spm' - if bim['YearBuilt'] >= ( - datetime.datetime.now(tz=datetime.timezone.utc).year - 35 - ): - roof_quality = 'god' - else: - roof_quality = 'por' else: - # year < 1975 - roof_cover = 'bur' - if bim['YearBuilt'] >= ( - datetime.datetime.now(tz=datetime.timezone.utc).year - 30 - ): - roof_quality = 'god' + if year >= 1975: + roof_cover = 'spm' + if BIM['YearBuilt'] >= (datetime.datetime.now().year - 35): + roof_quality = 'god' + else: + roof_quality = 'por' else: - roof_quality = 'por' + # year < 1975 + roof_cover = 'bur' + if BIM['YearBuilt'] >= (datetime.datetime.now().year - 30): + roof_quality = 'god' + else: + roof_quality = 'por' # Roof Deck Attachment (RDA) # IRC 2009-2015: @@ -130,35 +127,35 @@ def MMUH_config(bim: dict) -> str: # noqa: C901 # roughness length in the ruleset herein. # The base rule was then extended to the exposures closest to suburban and # light suburban, even though these are not considered by the code. - if bim['TerrainRoughness'] >= 35: # suburban or light trees - if bim['V_ult'] > 130.0: - rda = '8s' # 8d @ 6"/6" 'D' + if BIM['TerrainRoughness'] >= 35: # suburban or light trees + if BIM['V_ult'] > 130.0: + RDA = '8s' # 8d @ 6"/6" 'D' else: - rda = '8d' # 8d @ 6"/12" 'B' - elif bim['V_ult'] > 110.0: - rda = '8s' # 8d @ 6"/6" 'D' - else: - rda = '8d' # 8d @ 6"/12" 'B' + RDA = '8d' # 8d @ 6"/12" 'B' + else: # light suburban or open + if BIM['V_ult'] > 110.0: + RDA = '8s' # 8d @ 6"/6" 'D' + else: + RDA = '8d' # 8d @ 6"/12" 'B' # Roof-Wall Connection (RWC) - if bim['V_ult'] > 110.0: - rwc = 'strap' # Strap + if BIM['V_ult'] > 110.0: + RWC = 'strap' # Strap else: - rwc = 'tnail' # Toe-nail + RWC = 'tnail' # Toe-nail # Shutters # IRC 2000-2015: - # R301.2.1.2 in NJ IRC 2015 says protection of openings required - # for buildings located in WindBorneDebris regions, mentions - # impact-rated protection for glazing, impact-resistance for - # garage door glazed openings, and finally states that wood - # structural panels with a thickness > 7/16" and a span <8' can be - # used, as long as they are precut, attached to the framing - # surrounding the opening, and the attachments are resistant to - # corrosion and are able to resist component and cladding loads; + # R301.2.1.2 in NJ IRC 2015 says protection of openings required for + # buildings located in WindBorneDebris regions, mentions impact-rated protection for + # glazing, impact-resistance for garage door glazed openings, and finally + # states that wood structural panels with a thickness > 7/16" and a + # span <8' can be used, as long as they are precut, attached to the framing + # surrounding the opening, and the attachments are resistant to corrosion + # and are able to resist component and cladding loads; # Earlier IRC editions provide similar rules. if year >= 2000: - shutters = bim['WindBorneDebris'] + shutters = BIM['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -168,46 +165,45 @@ def MMUH_config(bim: dict) -> str: # noqa: C901 # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. - elif bim['WindBorneDebris']: - shutters = random.random() < 0.46 else: - shutters = False + if BIM['WindBorneDebris']: + shutters = random.random() < 0.46 + else: + shutters = False # Masonry Reinforcing (MR) # R606.6.4.1.2 Metal Reinforcement states that walls other than interior # non-load-bearing walls shall be anchored at vertical intervals of not - # more than 8 inches with joint reinforcement of not less than 9-gage. + # more than 8 inches with joint reinforcement of not less than 9 gage. # Therefore this ruleset assumes that all exterior or load-bearing masonry # walls will have reinforcement. Since our considerations deal with wind # speed, I made the assumption that only exterior walls are being taken # into consideration. - mr = True + MR = True - stories = min(bim['NumberOfStories'], 3) + stories = min(BIM['NumberOfStories'], 3) # extend the BIM dictionary - bim.update( - { - 'SecondaryWaterResistance': swr, - 'RoofCover': roof_cover, - 'RoofQuality': roof_quality, - 'RoofDeckAttachmentW': rda, - 'RoofToWallConnection': rwc, - 'Shutters': shutters, - 'MasonryReinforcing': mr, - } - ) - - return ( - f"M.MUH." - f"{int(stories)}." - f"{bim['RoofShape']}." - f"{int(swr)}." - f"{roof_cover}." - f"{roof_quality}." - f"{rda}." - f"{rwc}." - f"{int(shutters)}." - f"{int(mr)}." - f"{int(bim['TerrainRoughness'])}" - ) + BIM.update(dict( + SecondaryWaterResistance = SWR, + RoofCover = roof_cover, + RoofQuality = roof_quality, + RoofDeckAttachmentW = RDA, + RoofToWallConnection = RWC, + Shutters = shutters, + MasonryReinforcing = MR, + )) + + bldg_config = f"M.MUH." \ + f"{int(stories)}." \ + f"{BIM['RoofShape']}." \ + f"{int(SWR)}." \ + f"{roof_cover}." \ + f"{roof_quality}." \ + f"{RDA}." \ + f"{RWC}." \ + f"{int(shutters)}." \ + f"{int(MR)}." \ + f"{int(BIM['TerrainRoughness'])}" + + return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindMSFRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMSFRulesets.py index bb538715b..a9878d9de 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMSFRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMSFRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -42,13 +43,12 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import datetime import random +import datetime - -def MSF_config(bim: dict) -> str: # noqa: C901 +def MSF_config(BIM): """ - Rules to identify a HAZUS MSF configuration based on BIM data. + Rules to identify a HAZUS MSF configuration based on BIM data Parameters ---------- @@ -58,37 +58,36 @@ def MSF_config(bim: dict) -> str: # noqa: C901 Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Roof-Wall Connection (RWC) - if bim['HazardProneRegion']: - rwc = 'strap' # Strap + if BIM['HazardProneRegion']: + RWC = 'strap' # Strap else: - rwc = 'tnail' # Toe-nail + RWC = 'tnail' # Toe-nail # Roof Frame Type - rft = bim['RoofSystem'] + RFT = BIM['RoofSystem'] # Story Flag - stories = min(bim['NumberOfStories'], 2) + stories = min(BIM['NumberOfStories'], 2) # Shutters # IRC 2000-2015: - # R301.2.1.2 in NJ IRC 2015 says protection of openings required - # for buildings located in WindBorneDebris regions, mentions - # impact-rated protection for glazing, impact-resistance for - # garage door glazed openings, and finally states that wood - # structural panels with a thickness > 7/16" and a span <8' can be - # used, as long as they are precut, attached to the framing - # surrounding the opening, and the attachments are resistant to - # corrosion and are able to resist component and cladding loads; + # R301.2.1.2 in NJ IRC 2015 says protection of openings required for + # buildings located in WindBorneDebris regions, mentions impact-rated protection for + # glazing, impact-resistance for garage door glazed openings, and finally + # states that wood structural panels with a thickness > 7/16" and a + # span <8' can be used, as long as they are precut, attached to the framing + # surrounding the opening, and the attachments are resistant to corrosion + # and are able to resist component and cladding loads; # Earlier IRC editions provide similar rules. if year >= 2000: - shutters = bim['WindBorneDebris'] + shutters = BIM['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -98,12 +97,15 @@ def MSF_config(bim: dict) -> str: # noqa: C901 # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. - elif bim['WindBorneDebris']: - shutters = random.random() < 0.45 else: - shutters = False + if BIM['WindBorneDebris']: + shutters = random.random() < 0.45 + else: + shutters = False + + + if BIM['RoofSystem'] == 'trs': - if bim['RoofSystem'] == 'trs': # Roof Deck Attachment (RDA) # IRC codes: # NJ code requires 8d nails (with spacing 6”/12”) for sheathing thicknesses @@ -112,22 +114,22 @@ def MSF_config(bim: dict) -> str: # noqa: C901 # codes. Commentary for Table R602.3(1) indicates 8d nails with 6”/6” # spacing (enhanced roof spacing) for ultimate wind speeds greater than # a speed_lim. speed_lim depends on the year of construction - rda = '6d' # Default (aka A) in Reorganized Rulesets - WIND + RDA = '6d' # Default (aka A) in Reorganized Rulesets - WIND if year >= 2016: # IRC 2015 - speed_lim = 130.0 # mph + speed_lim = 130.0 # mph else: # IRC 2000 - 2009 - speed_lim = 100.0 # mph - if bim['V_ult'] > speed_lim: - rda = '8s' # 8d @ 6"/6" ('D' in the Reorganized Rulesets - WIND) + speed_lim = 100.0 # mph + if BIM['V_ult'] > speed_lim: + RDA = '8s' # 8d @ 6"/6" ('D' in the Reorganized Rulesets - WIND) else: - rda = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) + RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) # Secondary Water Resistance (SWR) # Minimum drainage recommendations are in place in NJ (See below). # However, SWR indicates a code-plus practice. - swr: int | float | str = random.random() < 0.6 + SWR = random.random() < 0.6 # Garage # As per IRC 2015: @@ -144,68 +146,69 @@ def MSF_config(bim: dict) -> str: # noqa: C901 # (and therefore do not have any strength requirements) that are older than # 30 years are considered to be weak, whereas those from the last 30 years # are considered to be standard. - if bim['Garage'] == -1: + if BIM['Garage'] == -1: # no garage data, using the default "none" garage = 'no' - elif year > (datetime.datetime.now(tz=datetime.timezone.utc).year - 30): - if bim['Garage'] < 1: - garage = 'no' # None - elif shutters: - garage = 'sup' # SFBC 1994 - else: - garage = 'std' # Standard - elif bim['Garage'] < 1: - garage = 'no' # None - elif shutters: - garage = 'sup' else: - garage = 'wkd' # Weak + if year > (datetime.datetime.now().year - 30): + if BIM['Garage'] < 1: + garage = 'no' # None + else: + if shutters: + garage = 'sup' # SFBC 1994 + else: + garage = 'std' # Standard + else: + # year <= current year - 30 + if BIM['Garage'] < 1: + garage = 'no' # None + else: + if shutters: + garage = 'sup' + else: + garage = 'wkd' # Weak # Masonry Reinforcing (MR) # R606.6.4.1.2 Metal Reinforcement states that walls other than interior # non-load-bearing walls shall be anchored at vertical intervals of not - # more than 8 inches with joint reinforcement of not less than 9-gage. + # more than 8 inches with joint reinforcement of not less than 9 gage. # Therefore this ruleset assumes that all exterior or load-bearing masonry # walls will have reinforcement. Since our considerations deal with wind # speed, I made the assumption that only exterior walls are being taken # into consideration. - mr = True + MR = True - stories = min(bim['NumberOfStories'], 2) + stories = min(BIM['NumberOfStories'], 2) # extend the BIM dictionary - bim.update( - { - 'SecondaryWaterResistance': swr, - 'RoofDeckAttachmentW': rda, - 'RoofToWallConnection': rwc, - 'Shutters': shutters, - 'AugmentGarage': garage, - 'MasonryReinforcing': mr, - } - ) + BIM.update(dict( + SecondaryWaterResistance = SWR, + RoofDeckAttachmentW = RDA, + RoofToWallConnection = RWC, + Shutters = shutters, + AugmentGarage = garage, + MasonryReinforcing = MR, + )) - bldg_config = ( - f"M.SF." - f"{int(stories)}." - f"{bim['RoofShape']}." - f"{rwc}." - f"{rft}." - f"{rda}." - f"{int(shutters)}." - f"{int(swr)}." - f"{garage}." - f"{int(mr)}." - f"null." - f"{int(bim['TerrainRoughness'])}" - ) + bldg_config = f"M.SF." \ + f"{int(stories)}." \ + f"{BIM['RoofShape']}." \ + f"{RWC}." \ + f"{RFT}." \ + f"{RDA}." \ + f"{int(shutters)}." \ + f"{int(SWR)}." \ + f"{garage}." \ + f"{int(MR)}." \ + f"null." \ + f"{int(BIM['TerrainRoughness'])}" else: # Roof system = OSJW # r # A 2015 study found that there were 750,000 metal roof installed in 2015, # out of 5 million new roofs in the US annually. If these numbers stay - # relatively stable, that implies that roughly 15% of roofs are smlt. + # relatively stable, that implies that roughtly 15% of roofs are smlt. # ref. link: https://www.bdcnetwork.com/blog/metal-roofs-are-soaring- # popularity-residential-marmet roof_cover_options = ['smtl', 'cshl'] @@ -216,54 +219,44 @@ def MSF_config(bim: dict) -> str: # noqa: C901 # high wind attachments are required for DSWII > 142 mph # NJ IBC 1507.4.5 (for smtl) # high wind attachment are required for DSWII > 142 mph - if bim['V_ult'] > 142.0: - rda = 'sup' # superior + if BIM['V_ult'] > 142.0: + RDA = 'sup' # superior else: - rda = 'std' # standard + RDA = 'std' # standard # Secondary Water Resistance (SWR) # Minimum drainage recommendations are in place in NJ (See below). # However, SWR indicates a code-plus practice. + SWR = 'null' # Default + if BIM['RoofShape'] == 'flt': + SWR = int(True) + elif ((BIM['RoofShape'] in ['hip', 'gab']) and + (roof_cover=='cshl') and (RDA=='sup')): + SWR = int(random.random() < 0.6) - # Default - swr = 'null' # type: ignore[no-redef] - - if bim['RoofShape'] == 'flt': - swr = int(True) # type: ignore[assignment] - elif ( - (bim['RoofShape'] in {'hip', 'gab'}) - and (roof_cover == 'cshl') - and (rda == 'sup') - ): - swr = int(random.random() < 0.6) - - stories = min(bim['NumberOfStories'], 2) + stories = min(BIM['NumberOfStories'], 2) # extend the BIM dictionary - bim.update( - { - 'SecondaryWaterResistance': swr, - 'RoofDeckAttachmentW': rda, - 'RoofToWallConnection': rwc, - 'Shutters': shutters, - 'AugmentGarage': garage, # type: ignore[used-before-def] - 'MasonryReinforcing': mr, # type: ignore[used-before-def] - } - ) + BIM.update(dict( + SecondaryWaterResistance = SWR, + RoofDeckAttachmentW = RDA, + RoofToWallConnection = RWC, + Shutters = shutters, + AugmentGarage = garage, + MasonryReinforcing = MR, + )) - bldg_config = ( - f"M.SF." - f"{int(stories)}." - f"{bim['RoofShape']}." - f"{rwc}." - f"{rft}." - f"{rda}." - f"{int(shutters)}." - f"{swr}." - f"null." - f"null." - f"{roof_cover}." - f"{int(bim['TerrainRoughness'])}" - ) + bldg_config = f"M.SF." \ + f"{int(stories)}." \ + f"{BIM['RoofShape']}." \ + f"{RWC}." \ + f"{RFT}." \ + f"{RDA}." \ + f"{int(shutters)}." \ + f"{SWR}." \ + f"null." \ + f"null." \ + f"{roof_cover}." \ + f"{int(BIM['TerrainRoughness'])}" return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindMetaVarRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMetaVarRulesets.py index af608140c..baf5108d8 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindMetaVarRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindMetaVarRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -42,13 +43,13 @@ # Meredith Lockhead # Tracy Kijewski-Correa -from __future__ import annotations - +import random import numpy as np import pandas as pd +import datetime +import math - -def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: C901, PLR0912 +def parse_BIM(BIM_in, location, hazards): """ Parses the information provided in the AIM model. @@ -82,66 +83,77 @@ def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: ------- BIM_ap: dictionary Parsed building characteristics. - """ + # check location - if location not in {'LA', 'NJ'}: - print(f'WARNING: The provided location is not recognized: {location}') # noqa: T201 + if location not in ['LA', 'NJ']: + print(f'WARNING: The provided location is not recognized: {location}') # check hazard for hazard in hazards: - if hazard not in {'wind', 'inundation'}: - print(f'WARNING: The provided hazard is not recognized: {hazard}') # noqa: T201 + if hazard not in ['wind', 'inundation']: + print(f'WARNING: The provided hazard is not recognized: {hazard}') # initialize the BIM dict - bim_ap = bim_in.copy() + BIM_ap = BIM_in.copy() if 'wind' in hazards: + # maps roof type to the internal representation - ap_roof_type = { - 'hip': 'hip', + ap_RoofType = { + 'hip' : 'hip', 'hipped': 'hip', - 'Hip': 'hip', + 'Hip' : 'hip', 'gabled': 'gab', - 'gable': 'gab', - 'Gable': 'gab', - 'flat': 'flt', - 'Flat': 'flt', + 'gable' : 'gab', + 'Gable' : 'gab', + 'flat' : 'flt', + 'Flat' : 'flt' } # maps roof system to the internal representation - ap_roof_syste = {'Wood': 'trs', 'OWSJ': 'ows', 'N/A': 'trs'} - roof_system = bim_in.get('RoofSystem', 'Wood') + ap_RoofSyste = { + 'Wood': 'trs', + 'OWSJ': 'ows', + 'N/A': 'trs' + } + roof_system = BIM_in.get('RoofSystem','Wood') if pd.isna(roof_system): roof_system = 'Wood' - # flake8 - unused variable: `ap_NoUnits`. - # # maps number of units to the internal representation - # ap_NoUnits = { - # 'Single': 'sgl', - # 'Multiple': 'mlt', - # 'Multi': 'mlt', - # 'nav': 'nav', - # } - - # maps for design level (Marginal Engineered is mapped to - # Engineered as default) - ap_design_level = {'E': 'E', 'NE': 'NE', 'PE': 'PE', 'ME': 'E'} - design_level = bim_in.get('DesignLevel', 'E') + # maps number of units to the internal representation + ap_NoUnits = { + 'Single': 'sgl', + 'Multiple': 'mlt', + 'Multi': 'mlt', + 'nav': 'nav' + } + + # maps for design level (Marginal Engineered is mapped to Engineered as default) + ap_DesignLevel = { + 'E': 'E', + 'NE': 'NE', + 'PE': 'PE', + 'ME': 'E' + } + design_level = BIM_in.get('DesignLevel','E') if pd.isna(design_level): design_level = 'E' # Average January Temp. - ap_ajt = {'Above': 'above', 'Below': 'below'} + ap_ajt = { + 'Above': 'above', + 'Below': 'below' + } # Year built alname_yearbuilt = ['YearBuiltNJDEP', 'yearBuilt', 'YearBuiltMODIV'] yearbuilt = None try: - yearbuilt = bim_in['YearBuilt'] - except KeyError: + yearbuilt = BIM_in['YearBuilt'] + except: for i in alname_yearbuilt: - if i in bim_in: - yearbuilt = bim_in[i] + if i in BIM_in.keys(): + yearbuilt = BIM_in[i] break # if none of the above works, set a default @@ -149,19 +161,14 @@ def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: yearbuilt = 1985 # Number of Stories - alname_nstories = [ - 'stories', - 'NumberofStories0', - 'NumberofStories', - 'NumberofStories1', - ] + alname_nstories = ['stories', 'NumberofStories0', 'NumberofStories', 'NumberofStories1'] nstories = None try: - nstories = bim_in['NumberOfStories'] - except KeyError as e: + nstories = BIM_in['NumberOfStories'] + except Exception as e: for i in alname_nstories: - if i in bim_in: - nstories = bim_in[i] + if i in BIM_in.keys(): + nstories = BIM_in[i] break # if none of the above works, we need to raise an exception @@ -172,11 +179,11 @@ def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: alname_area = ['area', 'PlanArea1', 'Area', 'PlanArea0'] area = None try: - area = bim_in['PlanArea'] - except KeyError as e: + area = BIM_in['PlanArea'] + except Exception as e: for i in alname_area: - if i in bim_in: - area = bim_in[i] + if i in BIM_in.keys(): + area = BIM_in[i] break # if none of the above works, we need to raise an exception @@ -186,21 +193,22 @@ def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: # Design Wind Speed alname_dws = ['DSWII', 'DWSII', 'DesignWindSpeed'] - dws = bim_in.get('DesignWindSpeed') + dws = BIM_in.get('DesignWindSpeed', None) if dws is None: for alname in alname_dws: - if alname in bim_in: - dws = bim_in[alname] + if alname in BIM_in.keys(): + dws = BIM_in[alname] break + alname_occupancy = ['occupancy'] oc = None try: - oc = bim_in['OccupancyClass'] - except KeyError as e: + oc = BIM_in['OccupancyClass'] + except Exception as e: for i in alname_occupancy: - if i in bim_in: - oc = bim_in[i] + if i in BIM_in.keys(): + oc = BIM_in[i] break # if none of the above works, we need to raise an exception @@ -212,7 +220,7 @@ def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: oc = 'RES3A' # maps for flood zone - ap_flood_zone = { + ap_FloodZone = { # Coastal areas with a 1% or greater chance of flooding and an # additional hazard associated with storm waves. 6101: 'VE', @@ -230,17 +238,17 @@ def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: 6113: 'OW', 6114: 'D', 6115: 'NA', - 6119: 'NA', + 6119: 'NA' } - if isinstance(bim_in['FloodZone'], int): + if type(BIM_in['FloodZone']) == int: # NJDEP code for flood zone (conversion to the FEMA designations) - floodzone_fema = ap_flood_zone[bim_in['FloodZone']] + floodzone_fema = ap_FloodZone[BIM_in['FloodZone']] else: # standard input should follow the FEMA flood zone designations - floodzone_fema = bim_in['FloodZone'] + floodzone_fema = BIM_in['FloodZone'] # maps for BuildingType - ap_building_type_nj = { + ap_BuildingType_NJ = { # Coastal areas with a 1% or greater chance of flooding and an # additional hazard associated with storm waves. 3001: 'Wood', @@ -251,62 +259,55 @@ def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: } if location == 'NJ': # NJDEP code for flood zone needs to be converted - buildingtype = ap_building_type_nj[bim_in['BuildingType']] + buildingtype = ap_BuildingType_NJ[BIM_in['BuildingType']] elif location == 'LA': # standard input should provide the building type as a string - buildingtype = bim_in['BuildingType'] + buildingtype = BIM_in['BuildingType'] # first, pull in the provided data - bim_ap.update( - { - 'OccupancyClass': str(oc), - 'BuildingType': buildingtype, - 'YearBuilt': int(yearbuilt), - # double check with Tracy for format - (NumberStories0 - # is 4-digit code) - # (NumberStories1 is image-processed story number) - 'NumberOfStories': int(nstories), - 'PlanArea': float(area), - 'FloodZone': floodzone_fema, - 'V_ult': float(dws), - 'AvgJanTemp': ap_ajt[bim_in.get('AvgJanTemp', 'Below')], - 'RoofShape': ap_roof_type[bim_in['RoofShape']], - 'RoofSlope': float(bim_in.get('RoofSlope', 0.25)), # default 0.25 - 'SheathingThickness': float( - bim_in.get('SheathingThick', 1.0) - ), # default 1.0 - 'RoofSystem': str( - ap_roof_syste[roof_system] - ), # only valid for masonry structures - 'Garage': float(bim_in.get('Garage', -1.0)), - 'LULC': bim_in.get('LULC', -1), - 'z0': float( - bim_in.get('z0', -1) - ), # if the z0 is already in the input file - 'Terrain': bim_in.get('Terrain', -1), - 'MeanRoofHt': float(bim_in.get('MeanRoofHt', 15.0)), # default 15 - 'DesignLevel': str( - ap_design_level[design_level] - ), # default engineered - 'WindowArea': float(bim_in.get('WindowArea', 0.20)), - 'WoodZone': str(bim_in.get('WindZone', 'I')), - } - ) + BIM_ap.update(dict( + OccupancyClass=str(oc), + BuildingType=buildingtype, + YearBuilt=int(yearbuilt), + # double check with Tracy for format - (NumberStories0 is 4-digit code) + # (NumberStories1 is image-processed story number) + NumberOfStories=int(nstories), + PlanArea=float(area), + FloodZone=floodzone_fema, + V_ult=float(dws), + AvgJanTemp=ap_ajt[BIM_in.get('AvgJanTemp','Below')], + RoofShape=ap_RoofType[BIM_in['RoofShape']], + RoofSlope=float(BIM_in.get('RoofSlope',0.25)), # default 0.25 + SheathingThickness=float(BIM_in.get('SheathingThick',1.0)), # default 1.0 + RoofSystem=str(ap_RoofSyste[roof_system]), # only valid for masonry structures + Garage=float(BIM_in.get('Garage',-1.0)), + LULC=BIM_in.get('LULC',-1), + z0 = float(BIM_in.get('z0',-1)), # if the z0 is already in the input file + Terrain = BIM_in.get('Terrain',-1), + MeanRoofHt=float(BIM_in.get('MeanRoofHt',15.0)), # default 15 + DesignLevel=str(ap_DesignLevel[design_level]), # default engineered + WindowArea=float(BIM_in.get('WindowArea',0.20)), + WoodZone=str(BIM_in.get('WindZone', 'I')) + )) if 'inundation' in hazards: + # maps for split level - ap_split_level = {'NO': 0, 'YES': 1} + ap_SplitLevel = { + 'NO': 0, + 'YES': 1 + } - foundation = bim_in.get('FoundationType', 3501) + foundation = BIM_in.get('FoundationType',3501) if pd.isna(foundation): foundation = 3501 - nunits = bim_in.get('NoUnits', 1) + nunits = BIM_in.get('NoUnits',1) if pd.isna(nunits): nunits = 1 # maps for flood zone - ap_flood_zone = { + ap_FloodZone = { # Coastal areas with a 1% or greater chance of flooding and an # additional hazard associated with storm waves. 6101: 'VE', @@ -324,45 +325,40 @@ def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: 6113: 'OW', 6114: 'D', 6115: 'NA', - 6119: 'NA', + 6119: 'NA' } - if isinstance(bim_in['FloodZone'], int): + if type(BIM_in['FloodZone']) == int: # NJDEP code for flood zone (conversion to the FEMA designations) - floodzone_fema = ap_flood_zone[bim_in['FloodZone']] + floodzone_fema = ap_FloodZone[BIM_in['FloodZone']] else: # standard input should follow the FEMA flood zone designations - floodzone_fema = bim_in['FloodZone'] + floodzone_fema = BIM_in['FloodZone'] # add the parsed data to the BIM dict - bim_ap.update( - { - 'DesignLevel': str( - ap_design_level[design_level] - ), # default engineered - 'NumberOfUnits': int(nunits), - 'FirstFloorElevation': float(bim_in.get('FirstFloorHt1', 10.0)), - 'SplitLevel': bool( - ap_split_level[bim_in.get('SplitLevel', 'NO')] - ), # dfault: no - 'FoundationType': int(foundation), # default: pile - 'City': bim_in.get('City', 'NA'), - 'FloodZone': str(floodzone_fema), - } - ) + BIM_ap.update(dict( + DesignLevel=str(ap_DesignLevel[design_level]), # default engineered + NumberOfUnits=int(nunits), + FirstFloorElevation=float(BIM_in.get('FirstFloorHt1',10.0)), + SplitLevel=bool(ap_SplitLevel[BIM_in.get('SplitLevel','NO')]), # dfault: no + FoundationType=int(foundation), # default: pile + City=BIM_in.get('City','NA'), + FloodZone =str(floodzone_fema) + )) # add inferred, generic meta-variables if 'wind' in hazards: + # Hurricane-Prone Region (HRP) # Areas vulnerable to hurricane, defined as the U.S. Atlantic Ocean and # Gulf of Mexico coasts where the ultimate design wind speed, V_ult is # greater than a pre-defined limit. - if bim_ap['YearBuilt'] >= 2016: + if BIM_ap['YearBuilt'] >= 2016: # The limit is 115 mph in IRC 2015 - hpr = bim_ap['V_ult'] > 115.0 + HPR = BIM_ap['V_ult'] > 115.0 else: # The limit is 90 mph in IRC 2009 and earlier versions - hpr = bim_ap['V_ult'] > 90.0 + HPR = BIM_ap['V_ult'] > 90.0 # Wind Borne Debris # Areas within hurricane-prone regions are affected by debris if one of @@ -372,30 +368,25 @@ def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: # (2) In areas where the ultimate design wind speed is greater than # general_lim # The flood_lim and general_lim limits depend on the year of construction - if bim_ap['YearBuilt'] >= 2016: + if BIM_ap['YearBuilt'] >= 2016: # In IRC 2015: - flood_lim = 130.0 # mph - general_lim = 140.0 # mph + flood_lim = 130.0 # mph + general_lim = 140.0 # mph else: # In IRC 2009 and earlier versions - flood_lim = 110.0 # mph - general_lim = 120.0 # mph + flood_lim = 110.0 # mph + general_lim = 120.0 # mph # Areas within hurricane-prone regions located in accordance with # one of the following: # (1) Within 1 mile (1.61 km) of the coastal mean high water line # where the ultimate design wind speed is 130 mph (58m/s) or greater. # (2) In areas where the ultimate design wind speed is 140 mph (63.5m/s) # or greater. (Definitions: Chapter 2, 2015 NJ Residential Code) - if not hpr: - wbd = False + if not HPR: + WBD = False else: - wbd = ( - ( - bim_ap['FloodZone'].startswith('A') - or bim_ap['FloodZone'].startswith('V') - ) - and bim_ap['V_ult'] >= flood_lim - ) or (bim_ap['V_ult'] >= general_lim) + WBD = (((BIM_ap['FloodZone'].startswith('A') or BIM_ap['FloodZone'].startswith('V')) and + BIM_ap['V_ult'] >= flood_lim) or (BIM_ap['V_ult'] >= general_lim)) # Terrain # open (0.03) = 3 @@ -406,87 +397,69 @@ def parse_BIM(bim_in: dict, location: str, hazards: list[str]) -> dict: # noqa: # Mapped to Land Use Categories in NJ (see https://www.state.nj.us/dep/gis/ # digidownload/metadata/lulc02/anderson2002.html) by T. Wu group # (see internal report on roughness calculations, Table 4). - # These are mapped to Hazus definitions as follows: - # Open Water (5400s) with zo=0.01 and barren land (7600) with - # zo=0.04 assume Open Open Space Developed, Low Intensity - # Developed, Medium Intensity Developed (1110-1140) assumed - # zo=0.35-0.4 assume Suburban High Intensity Developed (1600) - # with zo=0.6 assume Lt. Tree Forests of all classes - # (4100-4300) assumed zo=0.6 assume Lt. Tree Shrub (4400) with - # zo=0.06 assume Open Grasslands, pastures and agricultural - # areas (2000 series) with zo=0.1-0.15 assume Lt. Suburban - # Woody Wetlands (6250) with zo=0.3 assume suburban Emergent - # Herbaceous Wetlands (6240) with zo=0.03 assume Open + # These are mapped to Hazus defintions as follows: + # Open Water (5400s) with zo=0.01 and barren land (7600) with zo=0.04 assume Open + # Open Space Developed, Low Intensity Developed, Medium Intensity Developed + # (1110-1140) assumed zo=0.35-0.4 assume Suburban + # High Intensity Developed (1600) with zo=0.6 assume Lt. Tree + # Forests of all classes (4100-4300) assumed zo=0.6 assume Lt. Tree + # Shrub (4400) with zo=0.06 assume Open + # Grasslands, pastures and agricultural areas (2000 series) with + # zo=0.1-0.15 assume Lt. Suburban + # Woody Wetlands (6250) with zo=0.3 assume suburban + # Emergent Herbaceous Wetlands (6240) with zo=0.03 assume Open # Note: HAZUS category of trees (1.00) does not apply to any LU/LC in NJ - terrain = 15 # Default in Reorganized Rulesets - WIND - lulc = bim_ap['LULC'] - terrain = bim_ap['Terrain'] - if bim_ap['z0'] > 0: - terrain = int(100 * bim_ap['z0']) - elif lulc > 0: - if bim_ap['FloodZone'].startswith('V') or bim_ap['FloodZone'] in { - 'A', - 'AE', - 'A1-30', - 'AR', - 'A99', - }: + terrain = 15 # Default in Reorganized Rulesets - WIND + LULC = BIM_ap['LULC'] + TER = BIM_ap['Terrain'] + if (BIM_ap['z0'] > 0): + terrain = int(100 * BIM_ap['z0']) + elif (LULC > 0): + if (BIM_ap['FloodZone'].startswith('V') or BIM_ap['FloodZone'] in ['A', 'AE', 'A1-30', 'AR', 'A99']): terrain = 3 - elif ((lulc >= 5000) and (lulc <= 5999)) or ( - (lulc in {4400, 6240}) or (lulc == 7600) - ): - terrain = 3 # Open - elif (lulc >= 2000) and (lulc <= 2999): - terrain = 15 # Light suburban - elif ((lulc >= 1110) and (lulc <= 1140)) or ( - (lulc >= 6250) and (lulc <= 6252) - ): - terrain = 35 # Suburban - elif ((lulc >= 4100) and (lulc <= 4300)) or (lulc == 1600): - terrain = 70 # light trees - elif terrain > 0: - if bim_ap['FloodZone'].startswith('V') or bim_ap['FloodZone'] in { - 'A', - 'AE', - 'A1-30', - 'AR', - 'A99', - }: + elif ((LULC >= 5000) and (LULC <= 5999)): + terrain = 3 # Open + elif ((LULC == 4400) or (LULC == 6240)) or (LULC == 7600): + terrain = 3 # Open + elif ((LULC >= 2000) and (LULC <= 2999)): + terrain = 15 # Light suburban + elif ((LULC >= 1110) and (LULC <= 1140)) or ((LULC >= 6250) and (LULC <= 6252)): + terrain = 35 # Suburban + elif ((LULC >= 4100) and (LULC <= 4300)) or (LULC == 1600): + terrain = 70 # light trees + elif (TER > 0): + if (BIM_ap['FloodZone'].startswith('V') or BIM_ap['FloodZone'] in ['A', 'AE', 'A1-30', 'AR', 'A99']): terrain = 3 - elif ((terrain >= 50) and (terrain <= 59)) or ( - (terrain in {44, 62}) or (terrain == 76) - ): - terrain = 3 # Open - elif (terrain >= 20) and (terrain <= 29): - terrain = 15 # Light suburban - elif terrain in {11, 61}: - terrain = 35 # Suburban - elif ((terrain >= 41) and (terrain <= 43)) or (terrain in {16, 17}): - terrain = 70 # light trees - - bim_ap.update( - { - # Nominal Design Wind Speed - # Former term was “Basic Wind Speed”; it is now the “Nominal Design - # Wind Speed (V_asd). Unit: mph." - 'V_asd': np.sqrt(0.6 * bim_ap['V_ult']), - 'HazardProneRegion': hpr, - 'WindBorneDebris': wbd, - 'TerrainRoughness': terrain, - } - ) + elif ((TER >= 50) and (TER <= 59)): + terrain = 3 # Open + elif ((TER == 44) or (TER == 62)) or (TER == 76): + terrain = 3 # Open + elif ((TER >= 20) and (TER <= 29)): + terrain = 15 # Light suburban + elif (TER == 11) or (TER == 61): + terrain = 35 # Suburban + elif ((TER >= 41) and (TER <= 43)) or (TER in [16, 17]): + terrain = 70 # light trees + + BIM_ap.update(dict( + # Nominal Design Wind Speed + # Former term was “Basic Wind Speed”; it is now the “Nominal Design + # Wind Speed (V_asd). Unit: mph." + V_asd = np.sqrt(0.6 * BIM_ap['V_ult']), + + HazardProneRegion=HPR, + WindBorneDebris=WBD, + TerrainRoughness=terrain, + )) if 'inundation' in hazards: - bim_ap.update( - { - # Flood Risk - # Properties in the High Water Zone (within 1 mile of - # the coast) are at risk of flooding and other - # wind-borne debris action. - # TODO: need high water zone for this and move it to # noqa: TD002 - # inputs! - 'FloodRisk': True, - } - ) - - return bim_ap + + BIM_ap.update(dict( + # Flood Risk + # Properties in the High Water Zone (within 1 mile of the coast) are at + # risk of flooding and other wind-borne debris action. + FloodRisk=True, # TODO: need high water zone for this and move it to inputs! + )) + + return BIM_ap + diff --git a/pelicun/tests/dl_calculation/rulesets/WindSECBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindSECBRulesets.py index b95effa50..d07f63fdf 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindSECBRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindSECBRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -44,10 +45,9 @@ import random - -def SECB_config(bim: dict) -> str: # noqa: C901 +def SECB_config(BIM): """ - Rules to identify a HAZUS SECB configuration based on BIM data. + Rules to identify a HAZUS SECB configuration based on BIM data Parameters ---------- @@ -57,25 +57,26 @@ def SECB_config(bim: dict) -> str: # noqa: C901 Returns ------- config: str - A string that identifies a specific configuration within this building + A string that identifies a specific configration within this buidling class. - """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Roof cover - if bim['RoofShape'] in {'gab', 'hip'}: + if BIM['RoofShape'] in ['gab', 'hip']: roof_cover = 'bur' # Warning: HAZUS does not have N/A option for CECB, so here we use bur - elif year >= 1975: - roof_cover = 'spm' else: - # year < 1975 - roof_cover = 'bur' + if year >= 1975: + roof_cover = 'spm' + else: + # year < 1975 + roof_cover = 'bur' # shutters if year >= 2000: - shutters = bim['WindBorneDebris'] + shutters = BIM['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -85,65 +86,66 @@ def SECB_config(bim: dict) -> str: # noqa: C901 # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. - elif bim['WindBorneDebris']: - shutters = random.random() < 0.46 else: - shutters = False + if BIM['WindBorneDebris']: + shutters = random.random() < 0.46 + else: + shutters = False # Wind Debris (widd in HAZSU) # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None - widd = 'C' # residential (default) - if bim['OccupancyClass'] in {'RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D'}: - widd = 'C' # residential - elif bim['OccupancyClass'] == 'AGR1': - widd = 'D' # None + WIDD = 'C' # residential (default) + if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', + 'RES3D']: + WIDD = 'C' # residential + elif BIM['OccupancyClass'] == 'AGR1': + WIDD = 'D' # None else: - widd = 'A' # Res/Comm + WIDD = 'A' # Res/Comm # Window area ratio - if bim['WindowArea'] < 0.33: - wwr = 'low' - elif bim['WindowArea'] < 0.5: - wwr = 'med' + if BIM['WindowArea'] < 0.33: + WWR = 'low' + elif BIM['WindowArea'] < 0.5: + WWR = 'med' else: - wwr = 'hig' + WWR = 'hig' # Metal RDA # 1507.2.8.1 High Wind Attachment. # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with - # the manufacturer's instructions. Fasteners are to be applied along + # the manufacturer’s instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. - if bim['V_ult'] > 142: - mrda = 'std' # standard + if BIM['V_ult'] > 142: + MRDA = 'std' # standard else: - mrda = 'sup' # superior + MRDA = 'sup' # superior - if bim['NumberOfStories'] <= 2: + if BIM['NumberOfStories'] <= 2: bldg_tag = 'S.ECB.L' - elif bim['NumberOfStories'] <= 5: + elif BIM['NumberOfStories'] <= 5: bldg_tag = 'S.ECB.M' else: bldg_tag = 'S.ECB.H' # extend the BIM dictionary - bim.update( - { - 'RoofCover': roof_cover, - 'WindowAreaRatio': wwr, - 'RoofDeckAttachmentM': mrda, - 'Shutters': shutters, - 'WindDebrisClass': widd, - } - ) + BIM.update(dict( + RoofCover = roof_cover, + WindowAreaRatio = WWR, + RoofDeckAttachmentM = MRDA, + Shutters = shutters, + WindDebrisClass=WIDD + )) + + bldg_config = f"{bldg_tag}." \ + f"{roof_cover}." \ + f"{int(shutters)}." \ + f"{WIDD}." \ + f"{MRDA}." \ + f"{WWR}." \ + f"{int(BIM['TerrainRoughness'])}" + + return bldg_config - return ( - f"{bldg_tag}." - f"{roof_cover}." - f"{int(shutters)}." - f"{widd}." - f"{mrda}." - f"{wwr}." - f"{int(bim['TerrainRoughness'])}" - ) diff --git a/pelicun/tests/dl_calculation/rulesets/WindSERBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindSERBRulesets.py index 942fd7a7b..d6711b347 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindSERBRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindSERBRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -44,10 +45,9 @@ import random - -def SERB_config(bim: dict) -> str: # noqa: C901 +def SERB_config(BIM): """ - Rules to identify a HAZUS SERB configuration based on BIM data. + Rules to identify a HAZUS SERB configuration based on BIM data Parameters ---------- @@ -57,25 +57,26 @@ def SERB_config(bim: dict) -> str: # noqa: C901 Returns ------- config: str - A string that identifies a specific configuration within this building + A string that identifies a specific configration within this buidling class. - """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Roof cover - if bim['RoofShape'] in {'gab', 'hip'}: + if BIM['RoofShape'] in ['gab', 'hip']: roof_cover = 'bur' # Warning: HAZUS does not have N/A option for CECB, so here we use bur - elif year >= 1975: - roof_cover = 'spm' else: - # year < 1975 - roof_cover = 'bur' + if year >= 1975: + roof_cover = 'spm' + else: + # year < 1975 + roof_cover = 'bur' # shutters if year >= 2000: - shutters = bim['WindBorneDebris'] + shutters = BIM['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -85,65 +86,65 @@ def SERB_config(bim: dict) -> str: # noqa: C901 # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. - elif bim['WindBorneDebris']: - shutters = random.random() < 0.46 else: - shutters = False + if BIM['WindBorneDebris']: + shutters = random.random() < 0.46 + else: + shutters = False # Wind Debris (widd in HAZSU) # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None - widd = 'C' # residential (default) - if bim['OccupancyClass'] in {'RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', 'RES3D'}: - widd = 'C' # residential - elif bim['OccupancyClass'] == 'AGR1': - widd = 'D' # None + WIDD = 'C' # residential (default) + if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C', + 'RES3D']: + WIDD = 'C' # residential + elif BIM['OccupancyClass'] == 'AGR1': + WIDD = 'D' # None else: - widd = 'A' # Res/Comm + WIDD = 'A' # Res/Comm # Window area ratio - if bim['WindowArea'] < 0.33: - wwr = 'low' - elif bim['WindowArea'] < 0.5: - wwr = 'med' + if BIM['WindowArea'] < 0.33: + WWR = 'low' + elif BIM['WindowArea'] < 0.5: + WWR = 'med' else: - wwr = 'hig' + WWR = 'hig' # Metal RDA # 1507.2.8.1 High Wind Attachment. # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with - # the manufacturer's instructions. Fasteners are to be applied along + # the manufacturer’s instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. - if bim['V_ult'] > 142: - mrda = 'std' # standard + if BIM['V_ult'] > 142: + MRDA = 'std' # standard else: - mrda = 'sup' # superior + MRDA = 'sup' # superior - if bim['NumberOfStories'] <= 2: + if BIM['NumberOfStories'] <= 2: bldg_tag = 'S.ERB.L' - elif bim['NumberOfStories'] <= 5: + elif BIM['NumberOfStories'] <= 5: bldg_tag = 'S.ERB.M' else: bldg_tag = 'S.ERB.H' # extend the BIM dictionary - bim.update( - { - 'RoofCover': roof_cover, - 'WindowAreaRatio': wwr, - 'RoofDeckAttachmentM': mrda, - 'Shutters': shutters, - 'WindDebrisClass': widd, - } - ) + BIM.update(dict( + RoofCover = roof_cover, + WindowAreaRatio = WWR, + RoofDeckAttachmentM = MRDA, + Shutters = shutters, + WindDebrisClass=WIDD + )) + + bldg_config = f"{bldg_tag}." \ + f"{roof_cover}." \ + f"{int(shutters)}." \ + f"{WIDD}." \ + f"{MRDA}." \ + f"{WWR}." \ + f"{int(BIM['TerrainRoughness'])}" - return ( - f"{bldg_tag}." - f"{roof_cover}." - f"{int(shutters)}." - f"{widd}." - f"{mrda}." - f"{wwr}." - f"{int(bim['TerrainRoughness'])}" - ) + return bldg_config diff --git a/pelicun/tests/dl_calculation/rulesets/WindSPMBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindSPMBRulesets.py index 16c653833..42f8a6407 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindSPMBRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindSPMBRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -42,13 +43,14 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import datetime import random +import numpy as np +import datetime -def SPMB_config(bim: dict) -> str: +def SPMB_config(BIM): """ - Rules to identify a HAZUS SPMB configuration based on BIM data. + Rules to identify a HAZUS SPMB configuration based on BIM data Parameters ---------- @@ -58,23 +60,21 @@ def SPMB_config(bim: dict) -> str: Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Roof Deck Age (~ Roof Quality) - if bim['YearBuilt'] >= ( - datetime.datetime.now(tz=datetime.timezone.utc).year - 50 - ): + if BIM['YearBuilt'] >= (datetime.datetime.now().year - 50): roof_quality = 'god' else: roof_quality = 'por' # shutters if year >= 2000: - shutters = bim['WindBorneDebris'] + shutters = BIM['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -84,43 +84,43 @@ def SPMB_config(bim: dict) -> str: # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. - elif bim['WindBorneDebris']: - shutters = random.random() < 0.46 else: - shutters = False + if BIM['WindBorneDebris']: + shutters = random.random() < 0.46 + else: + shutters = False # Metal RDA # 1507.2.8.1 High Wind Attachment. # Underlayment applied in areas subject to high winds (Vasd greater # than 110 mph as determined in accordance with Section 1609.3.1) shall # be applied with corrosion-resistant fasteners in accordance with - # the manufacturer's instructions. Fasteners are to be applied along + # the manufacturer’s instructions. Fasteners are to be applied along # the overlap not more than 36 inches on center. - if bim['V_ult'] > 142: - mrda = 'std' # standard + if BIM['V_ult'] > 142: + MRDA = 'std' # standard else: - mrda = 'sup' # superior + MRDA = 'sup' # superior - if bim['PlanArea'] <= 4000: + if BIM['PlanArea'] <= 4000: bldg_tag = 'S.PMB.S' - elif bim['PlanArea'] <= 50000: + elif BIM['PlanArea'] <= 50000: bldg_tag = 'S.PMB.M' else: bldg_tag = 'S.PMB.L' # extend the BIM dictionary - bim.update( - { - 'RoofQuality': roof_quality, - 'RoofDeckAttachmentM': mrda, - 'Shutters': shutters, - } - ) + BIM.update(dict( + RoofQuality = roof_quality, + RoofDeckAttachmentM = MRDA, + Shutters = shutters + )) + + bldg_config = f"{bldg_tag}." \ + f"{int(shutters)}." \ + f"{roof_quality}." \ + f"{MRDA}." \ + f"{int(BIM['TerrainRoughness'])}" + + return bldg_config - return ( - f"{bldg_tag}." - f"{int(shutters)}." - f"{roof_quality}." - f"{mrda}." - f"{int(bim['TerrainRoughness'])}" - ) diff --git a/pelicun/tests/dl_calculation/rulesets/WindWMUHRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindWMUHRulesets.py index 9de71dc26..6d5fe338d 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindWMUHRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindWMUHRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -42,13 +43,12 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import datetime import random +import datetime - -def WMUH_config(bim: dict) -> str: # noqa: C901 +def WMUH_config(BIM): """ - Rules to identify a HAZUS WMUH configuration based on BIM data. + Rules to identify a HAZUS WMUH configuration based on BIM data Parameters ---------- @@ -58,35 +58,37 @@ def WMUH_config(bim: dict) -> str: # noqa: C901 Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Secondary Water Resistance (SWR) - swr: str | int = 0 # Default + SWR = 0 # Default if year > 2000: - if bim['RoofShape'] == 'flt': - swr = 'null' # because SWR is not a question for flat roofs - elif bim['RoofShape'] in {'gab', 'hip'}: - swr = int(random.random() < 0.6) + if BIM['RoofShape'] == 'flt': + SWR = 'null' # because SWR is not a question for flat roofs + elif BIM['RoofShape'] in ['gab','hip']: + SWR = int(random.random() < 0.6) elif year > 1987: - if bim['RoofShape'] == 'flt': - swr = 'null' # because SWR is not a question for flat roofs - elif (bim['RoofShape'] == 'gab') or (bim['RoofShape'] == 'hip'): - if bim['RoofSlope'] < 0.33: - swr = int(True) + if BIM['RoofShape'] == 'flt': + SWR = 'null' # because SWR is not a question for flat roofs + elif (BIM['RoofShape'] == 'gab') or (BIM['RoofShape'] == 'hip'): + if BIM['RoofSlope'] < 0.33: + SWR = int(True) else: - swr = int(bim['AvgJanTemp'] == 'below') - elif bim['RoofShape'] == 'flt': - swr = 'null' # because SWR is not a question for flat roofs + SWR = int(BIM['AvgJanTemp'] == 'below') else: - swr = int(random.random() < 0.3) + # year <= 1987 + if BIM['RoofShape'] == 'flt': + SWR = 'null' # because SWR is not a question for flat roofs + else: + SWR = int(random.random() < 0.3) # Roof cover & Roof quality # Roof cover and quality do not apply to gable and hip roofs - if bim['RoofShape'] in {'gab', 'hip'}: + if BIM['RoofShape'] in ['gab', 'hip']: roof_cover = 'null' roof_quality = 'null' # NJ Building Code Section 1507 (in particular 1507.10 and 1507.12) address @@ -102,29 +104,26 @@ def WMUH_config(bim: dict) -> str: # noqa: C901 # We assume that all flat roofs built before 1975 are BURs and all roofs # built after 1975 are SPMs. # Nothing in NJ Building Code or in the Hazus manual specifies what - # constitutes “good” and “poor” roof conditions, so ruleset is dependent + # constitutes “good” and “poor” roof conditions, so ruleset is dependant # on the age of the roof and average lifespan of BUR and SPM roofs. # We assume that the average lifespan of a BUR roof is 30 years and the # average lifespan of a SPM is 35 years. Therefore, BURs installed before # 1990 are in poor condition, and SPMs installed before 1985 are in poor # condition. - elif year >= 1975: - roof_cover = 'spm' - if bim['YearBuilt'] >= ( - datetime.datetime.now(tz=datetime.timezone.utc).year - 35 - ): - roof_quality = 'god' - else: - roof_quality = 'por' else: - # year < 1975 - roof_cover = 'bur' - if bim['YearBuilt'] >= ( - datetime.datetime.now(tz=datetime.timezone.utc).year - 30 - ): - roof_quality = 'god' + if year >= 1975: + roof_cover = 'spm' + if BIM['YearBuilt'] >= (datetime.datetime.now().year - 35): + roof_quality = 'god' + else: + roof_quality = 'por' else: - roof_quality = 'por' + # year < 1975 + roof_cover = 'bur' + if BIM['YearBuilt'] >= (datetime.datetime.now().year - 30): + roof_quality = 'god' + else: + roof_quality = 'por' # Roof Deck Attachment (RDA) # IRC 2009-2015: @@ -140,15 +139,16 @@ def WMUH_config(bim: dict) -> str: # noqa: C901 # The base rule was then extended to the exposures closest to suburban and # light suburban, even though these are not considered by the code. if year > 2009: - if bim['TerrainRoughness'] >= 35: # suburban or light trees - if bim['V_ult'] > 168.0: - rda = '8s' # 8d @ 6"/6" 'D' + if BIM['TerrainRoughness'] >= 35: # suburban or light trees + if BIM['V_ult'] > 168.0: + RDA = '8s' # 8d @ 6"/6" 'D' else: - rda = '8d' # 8d @ 6"/12" 'B' - elif bim['V_ult'] > 142.0: - rda = '8s' # 8d @ 6"/6" 'D' - else: - rda = '8d' # 8d @ 6"/12" 'B' + RDA = '8d' # 8d @ 6"/12" 'B' + else: # light suburban or open + if BIM['V_ult'] > 142.0: + RDA = '8s' # 8d @ 6"/6" 'D' + else: + RDA = '8d' # 8d @ 6"/12" 'B' # IRC 2000-2006: # Table 2304.9.1, Line 31 of the 2006 # NJ IBC requires 8d nails (with spacing 6”/12”) for sheathing thicknesses @@ -158,47 +158,49 @@ def WMUH_config(bim: dict) -> str: # noqa: C901 # change of connector at a certain wind speed. # Thus, all RDAs are assumed to be 8d @ 6”/12”. elif year > 2000: - rda = '8d' # 8d @ 6"/12" 'B' + RDA = '8d' # 8d @ 6"/12" 'B' # BOCA 1996: # The BOCA 1996 Building Code Requires 8d nails (with spacing 6”/12”) for # roof sheathing thickness up to 1". See Table 2305.2, Section 4. # Attachment requirements are given based on sheathing thickness, basic # wind speed, and the mean roof height of the building. elif year > 1996: - if (bim['V_ult'] >= 103) and (bim['MeanRoofHt'] >= 25.0): - rda = '8s' # 8d @ 6"/6" 'D' + if (BIM['V_ult'] >= 103 ) and (BIM['MeanRoofHt'] >= 25.0): + RDA = '8s' # 8d @ 6"/6" 'D' else: - rda = '8d' # 8d @ 6"/12" 'B' + RDA = '8d' # 8d @ 6"/12" 'B' # BOCA 1993: # The BOCA 1993 Building Code Requires 8d nails (with spacing 6”/12”) for # sheathing thicknesses of 19/32 inches or greater, and 6d nails (with # spacing 6”/12”) for sheathing thicknesses of ½ inches or less. # See Table 2305.2, Section 4. elif year > 1993: - if bim['SheathingThickness'] <= 0.5: - rda = '6d' # 6d @ 6"/12" 'A' + if BIM['SheathingThickness'] <= 0.5: + RDA = '6d' # 6d @ 6"/12" 'A' else: - rda = '8d' # 8d @ 6"/12" 'B' - elif bim['SheathingThickness'] <= 0.5: - rda = '6d' # 6d @ 6"/12" 'A' + RDA = '8d' # 8d @ 6"/12" 'B' else: - rda = '8d' # 8d @ 6"/12" 'B' + # year <= 1993 + if BIM['SheathingThickness'] <= 0.5: + RDA = '6d' # 6d @ 6"/12" 'A' + else: + RDA = '8d' # 8d @ 6"/12" 'B' # Roof-Wall Connection (RWC) # IRC 2000-2015: # 1507.2.8.1 High Wind Attachment. Underlayment applied in areas subject # to high winds (Vasd greater than 110 mph as determined in accordance # with Section 1609.3.1) shall be applied with corrosion-resistant - # fasteners in accordance with the manufacturer's instructions. Fasteners + # fasteners in accordance with the manufacturer’s instructions. Fasteners # are to be applied along the overlap not more than 36 inches on center. # Underlayment installed where Vasd, in accordance with section 1609.3.1 # equals or exceeds 120 mph shall be attached in a grid pattern of 12 # inches between side laps with a 6-inch spacing at the side laps. if year > 2000: - if bim['V_ult'] > 142.0: - rwc = 'strap' # Strap + if BIM['V_ult'] > 142.0: + RWC = 'strap' # Strap else: - rwc = 'tnail' # Toe-nail + RWC = 'tnail' # Toe-nail # BOCA 1996 and earlier: # There is no mention of straps or enhanced tie-downs of any kind in the # BOCA codes, and there is no description of these adoptions in IBHS @@ -211,7 +213,7 @@ def WMUH_config(bim: dict) -> str: # noqa: C901 # codes, it is assumed that New Jersey did not adopt these standards until # the 2000 IBC. else: - rwc = 'tnail' # Toe-nail + RWC = 'tnail' # Toe-nail # Shutters # IRC 2000-2015: @@ -226,7 +228,7 @@ def WMUH_config(bim: dict) -> str: # noqa: C901 # are classified as a Group R-3 or R-4 occupancy. # Earlier IRC editions provide similar rules. if year >= 2000: - shutters = bim['WindBorneDebris'] + shutters = BIM['WindBorneDebris'] # BOCA 1996 and earlier: # Shutters were not required by code until the 2000 IBC. Before 2000, the # percentage of commercial buildings that have shutters is assumed to be @@ -236,36 +238,36 @@ def WMUH_config(bim: dict) -> str: # noqa: C901 # facilities. In addition to that, 46% of business owners reported boarding # up their businesses before Hurricane Katrina. In addition, compliance # rates based on the Homeowners Survey data hover between 43 and 50 percent. - elif bim['WindBorneDebris']: - shutters = random.random() < 0.46 else: - shutters = False + if BIM['WindBorneDebris']: + shutters = random.random() < 0.46 + else: + shutters = False # Stories # Buildings with more than 3 stories are mapped to the 3-story configuration - stories = min(bim['NumberOfStories'], 3) + stories = min(BIM['NumberOfStories'], 3) # extend the BIM dictionary - bim.update( - { - 'SecondaryWaterResistance': swr, - 'RoofCover': roof_cover, - 'RoofQuality': roof_quality, - 'RoofDeckAttachmentW': rda, - 'RoofToWallConnection': rwc, - 'Shutters': shutters, - } - ) + BIM.update(dict( + SecondaryWaterResistance = SWR, + RoofCover = roof_cover, + RoofQuality = roof_quality, + RoofDeckAttachmentW = RDA, + RoofToWallConnection = RWC, + Shutters = shutters + )) + + bldg_config = f"W.MUH." \ + f"{int(stories)}." \ + f"{BIM['RoofShape']}." \ + f"{roof_cover}." \ + f"{roof_quality}." \ + f"{SWR}." \ + f"{RDA}." \ + f"{RWC}." \ + f"{int(shutters)}." \ + f"{int(BIM['TerrainRoughness'])}" + + return bldg_config - return ( - f"W.MUH." - f"{int(stories)}." - f"{bim['RoofShape']}." - f"{roof_cover}." - f"{roof_quality}." - f"{swr}." - f"{rda}." - f"{rwc}." - f"{int(shutters)}." - f"{int(bim['TerrainRoughness'])}" - ) diff --git a/pelicun/tests/dl_calculation/rulesets/WindWSFRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindWSFRulesets.py index c619e326b..957ecbf9c 100644 --- a/pelicun/tests/dl_calculation/rulesets/WindWSFRulesets.py +++ b/pelicun/tests/dl_calculation/rulesets/WindWSFRulesets.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California @@ -42,13 +43,12 @@ # Meredith Lockhead # Tracy Kijewski-Correa -import datetime import random +import datetime - -def WSF_config(bim: dict) -> str: # noqa: C901 +def WSF_config(BIM): """ - Rules to identify a HAZUS WSF configuration based on BIM data. + Rules to identify a HAZUS WSF configuration based on BIM data Parameters ---------- @@ -58,21 +58,21 @@ def WSF_config(bim: dict) -> str: # noqa: C901 Returns ------- config: str - A string that identifies a specific configuration within this - building class. - + A string that identifies a specific configration within this buidling + class. """ - year = bim['YearBuilt'] # just for the sake of brevity + + year = BIM['YearBuilt'] # just for the sake of brevity # Secondary Water Resistance (SWR) # Minimum drainage recommendations are in place in NJ (See below). # However, SWR indicates a code-plus practice. - swr = False # Default in Reorganzied Rulesets - WIND + SWR = False # Default in Reorganzied Rulesets - WIND if year > 2000: # For buildings built after 2000, SWR is based on homeowner compliance # data from NC Coastal Homeowner Survey (2017) to capture potential # human behavior (% of sealed roofs in NC dataset). - swr = random.random() < 0.6 + SWR = random.random() < 0.6 elif year > 1983: # CABO 1995: # According to 903.2 in the 1995 CABO, for roofs with slopes between @@ -91,13 +91,13 @@ def WSF_config(bim: dict) -> str: # noqa: C901 # Almost all other roof types require underlayment of some sort, but # the ruleset is based on asphalt shingles because it is most # conservative. - if bim['RoofShape'] == 'flt': # note there is actually no 'flt' - swr = True - elif bim['RoofShape'] in {'gab', 'hip'}: - if bim['RoofSlope'] <= 0.17: - swr = True - elif bim['RoofSlope'] < 0.33: - swr = bim['AvgJanTemp'] == 'below' + if BIM['RoofShape'] == 'flt': # note there is actually no 'flt' + SWR = True + elif BIM['RoofShape'] in ['gab','hip']: + if BIM['RoofSlope'] <= 0.17: + SWR = True + elif BIM['RoofSlope'] < 0.33: + SWR = (BIM['AvgJanTemp'] == 'below') # Roof Deck Attachment (RDA) # IRC codes: @@ -107,42 +107,34 @@ def WSF_config(bim: dict) -> str: # noqa: C901 # codes. Commentary for Table R602.3(1) indicates 8d nails with 6”/6” # spacing (enhanced roof spacing) for ultimate wind speeds greater than # a speed_lim. speed_lim depends on the year of construction - rda = '6d' # Default (aka A) in Reorganized Rulesets - WIND + RDA = '6d' # Default (aka A) in Reorganized Rulesets - WIND if year > 2000: if year >= 2016: # IRC 2015 - speed_lim = 130.0 # mph + speed_lim = 130.0 # mph else: # IRC 2000 - 2009 - speed_lim = 100.0 # mph - if bim['V_ult'] > speed_lim: - rda = '8s' # 8d @ 6"/6" ('D' in the Reorganized Rulesets - WIND) + speed_lim = 100.0 # mph + if BIM['V_ult'] > speed_lim: + RDA = '8s' # 8d @ 6"/6" ('D' in the Reorganized Rulesets - WIND) else: - rda = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) + RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) elif year > 1995: - if (bim['SheathingThickness'] >= 0.3125) and ( - bim['SheathingThickness'] <= 0.5 - ): - rda = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND) - elif (bim['SheathingThickness'] >= 0.59375) and ( - bim['SheathingThickness'] <= 1.125 - ): - rda = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) + if ((BIM['SheathingThickness'] >= 0.3125) and (BIM['SheathingThickness'] <= 0.5)): + RDA = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND) + elif ((BIM['SheathingThickness'] >= 0.59375) and (BIM['SheathingThickness'] <= 1.125)): + RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) elif year > 1986: - if (bim['SheathingThickness'] >= 0.3125) and ( - bim['SheathingThickness'] <= 0.5 - ): - rda = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND) - elif (bim['SheathingThickness'] >= 0.59375) and ( - bim['SheathingThickness'] <= 1.0 - ): - rda = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) - elif (bim['SheathingThickness'] >= 0.3125) and ( - bim['SheathingThickness'] <= 0.5 - ): - rda = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND) - elif (bim['SheathingThickness'] >= 0.625) and (bim['SheathingThickness'] <= 1.0): - rda = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) + if ((BIM['SheathingThickness'] >= 0.3125) and (BIM['SheathingThickness'] <= 0.5)): + RDA = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND) + elif ((BIM['SheathingThickness'] >= 0.59375) and (BIM['SheathingThickness'] <= 1.0)): + RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) + else: + # year <= 1986 + if ((BIM['SheathingThickness'] >= 0.3125) and (BIM['SheathingThickness'] <= 0.5)): + RDA = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND) + elif ((BIM['SheathingThickness'] >= 0.625) and (BIM['SheathingThickness'] <= 1.0)): + RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND) # Roof-Wall Connection (RWC) # IRC 2015 @@ -154,10 +146,10 @@ def WSF_config(bim: dict) -> str: # noqa: C901 # will assume that if classified as HazardProneRegion, then enhanced # connection would be used. if year > 2015: - if bim['HazardProneRegion']: - rwc = 'strap' # Strap + if BIM['HazardProneRegion']: + RWC = 'strap' # Strap else: - rwc = 'tnail' # Toe-nail + RWC = 'tnail' # Toe-nail # IRC 2000-2009 # In Section R802.11.1 Uplift Resistance of the NJ 2009 IRC, roof # assemblies which are subject to wind uplift pressures of 20 pounds per @@ -175,10 +167,10 @@ def WSF_config(bim: dict) -> str: # noqa: C901 # 110 mph begin to generate pressures of 20 psf in high pressure zones of # the roof. Thus 110 mph is used as the critical velocity. elif year > 1992: - if bim['V_ult'] > 110: - rwc = 'strap' # Strap + if BIM['V_ult'] > 110: + RWC = 'strap' # Strap else: - rwc = 'tnail' # Toe-nail + RWC = 'tnail' # Toe-nail # CABO 1989 and earlier # There is no mention of straps or enhanced tie-downs in the CABO codes # older than 1992, and there is no description of these adoptions in IBHS @@ -191,7 +183,7 @@ def WSF_config(bim: dict) -> str: # noqa: C901 # buildings are toe nails before 1992. else: # year <= 1992 - rwc = 'tnail' # Toe-nail + RWC = 'tnail' # Toe-nail # Shutters # IRC 2000-2015: @@ -204,7 +196,7 @@ def WSF_config(bim: dict) -> str: # noqa: C901 # and are able to resist component and cladding loads; # Earlier IRC editions provide similar rules. if year > 2000: - shutters = bim['WindBorneDebris'] + shutters = BIM['WindBorneDebris'] # CABO: # Based on Human Subjects Data, roughly 45% of houses built in the 1980s # and 1990s had entries that implied they had shutters on at some or all of @@ -214,10 +206,12 @@ def WSF_config(bim: dict) -> str: # noqa: C901 # 1992 to 1995, 33/74 entries (44.59%) with shutters # 1986 to 1992, 36/79 entries (45.57%) with shutters # 1983 to 1986, 19/44 entries (43.18%) with shutters - elif bim['WindBorneDebris']: - shutters = random.random() < 0.45 else: - shutters = False + # year <= 2000 + if BIM['WindBorneDebris']: + shutters = random.random() < 0.45 + else: + shutters = False # Garage # As per IRC 2015: @@ -234,54 +228,57 @@ def WSF_config(bim: dict) -> str: # noqa: C901 # WindBorneDebris (and therefore do not have any strength requirements) that # are older than 30 years are considered to be weak, whereas those from the # last 30 years are considered to be standard. - if bim['Garage'] == -1: + if BIM['Garage'] == -1: # no garage data, using the default "standard" garage = 'std' - shutters = 0 # HAZUS ties standard garage to w/o shutters - elif year > 2000: - if shutters: - if bim['Garage'] < 1: - garage = 'no' + shutters = 0 # HAZUS ties standard garage to w/o shutters + else: + if year > 2000: + if shutters: + if BIM['Garage'] < 1: + garage = 'no' + else: + garage = 'sup' # SFBC 1994 + shutters = 1 # HAZUS ties SFBC 1994 to with shutters else: - garage = 'sup' # SFBC 1994 - shutters = 1 # HAZUS ties SFBC 1994 to with shutters - elif bim['Garage'] < 1: - garage = 'no' # None - else: - garage = 'std' # Standard - shutters = 0 # HAZUS ties standard garage to w/o shutters - elif year > (datetime.datetime.now(tz=datetime.timezone.utc).year - 30): - if bim['Garage'] < 1: - garage = 'no' # None + if BIM['Garage'] < 1: + garage = 'no' # None + else: + garage = 'std' # Standard + shutters = 0 # HAZUS ties standard garage to w/o shutters + elif year > (datetime.datetime.now().year - 30): + if BIM['Garage'] < 1: + garage = 'no' # None + else: + garage = 'std' # Standard + shutters = 0 # HAZUS ties standard garage to w/o shutters else: - garage = 'std' # Standard - shutters = 0 # HAZUS ties standard garage to w/o shutters - elif bim['Garage'] < 1: - garage = 'no' # None - else: - garage = 'wkd' # Weak - shutters = 0 # HAZUS ties weak garage to w/o shutters + # year <= current year - 30 + if BIM['Garage'] < 1: + garage = 'no' # None + else: + garage = 'wkd' # Weak + shutters = 0 # HAZUS ties weak garage to w/o shutters # extend the BIM dictionary - bim.update( - { - 'SecondaryWaterResistance': swr, - 'RoofDeckAttachmentW': rda, - 'RoofToWallConnection': rwc, - 'Shutters': shutters, - 'Garage': garage, - } - ) + BIM.update(dict( + SecondaryWaterResistance = SWR, + RoofDeckAttachmentW = RDA, + RoofToWallConnection = RWC, + Shutters = shutters, + Garage = garage + )) # building configuration tag - return ( - f"W.SF." - f"{int(min(bim['NumberOfStories'], 2))}." - f"{bim['RoofShape']}." - f"{int(swr)}." - f"{rda}." - f"{rwc}." - f"{garage}." - f"{int(shutters)}." - f"{int(bim['TerrainRoughness'])}" - ) + bldg_config = f"W.SF." \ + f"{int(min(BIM['NumberOfStories'],2))}." \ + f"{BIM['RoofShape']}." \ + f"{int(SWR)}." \ + f"{RDA}." \ + f"{RWC}." \ + f"{garage}." \ + f"{int(shutters)}." \ + f"{int(BIM['TerrainRoughness'])}" + + return bldg_config + diff --git a/pyproject.toml b/pyproject.toml index c08979782..3f313d44a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,6 @@ [tool.ruff] line-length = 85 +exclude = ["rulesets"] [tool.ruff.lint] # Enable all known categories @@ -22,14 +23,13 @@ max-bool-expr=5 "pelicun/tests/*" = ["D", "N802", "SLF001", "PLR2004", "PLR6301"] "pelicun/resources/auto/*" = ["ALL"] "pelicun/tools/HDF_to_CSV.py" = ["ALL"] -"pelicun/tests/dl_calculation/rulesets/*" = ["N999"] [tool.ruff.format] quote-style = "single" [tool.codespell] ignore-words = ["ignore_words.txt"] -skip = ["*.html", "./htmlcov/*", "./doc_src/build/*", "./pelicun.egg-info/*", "./doc_src/*", "./doc/build/*"] +skip = ["*.html", "./htmlcov/*", "./doc_src/build/*", "./pelicun.egg-info/*", "./doc_src/*", "./doc/build/*", "*/rulesets/*"] [tool.mypy] ignore_missing_imports = true From 84d773c4d34087ce040efc68b004dbcb7c72c6f6 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Thu, 24 Oct 2024 05:56:52 -0700 Subject: [PATCH 15/27] Revert changes to `custom_pop.py`. --- pelicun/tests/dl_calculation/e9/custom_pop.py | 145 +++++++----------- pyproject.toml | 5 +- 2 files changed, 63 insertions(+), 87 deletions(-) diff --git a/pelicun/tests/dl_calculation/e9/custom_pop.py b/pelicun/tests/dl_calculation/e9/custom_pop.py index fe99b9a47..35c30d679 100644 --- a/pelicun/tests/dl_calculation/e9/custom_pop.py +++ b/pelicun/tests/dl_calculation/e9/custom_pop.py @@ -1,50 +1,21 @@ -# Copyright (c) 2018 Leland Stanford Junior University -# Copyright (c) 2018 The Regents of the University of California -# -# This file is part of pelicun. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# 3. Neither the name of the copyright holder nor the names of its contributors -# may be used to endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# -# You should have received a copy of the BSD 3-Clause License along with -# pelicun. If not, see . +# -*- coding: utf-8 -*- -import pandas as pd +# Contributors: +# Stevan Gavrilovic +# Adam Zsarnoczay +# Example 9 Tsunami, Seaside +import pandas as pd -def auto_populate(aim: dict) -> tuple: +def auto_populate(AIM): """ - Populates the DL model for tsunami example using custom fragility functions. + Populates the DL model for tsunami example using custom fragility functions - Assumptions - ----------- - * Everything relevant to auto-population is provided in the - Building Information Model (AIM). - * The information expected in the AIM file is described in the - parse_AIM method. + Assumptions: + - Everything relevant to auto-population is provided in the Buiding + Information Model (AIM). + - The information expected in the AIM file is described in the parse_AIM + method. Parameters ---------- @@ -58,29 +29,30 @@ def auto_populate(aim: dict) -> tuple: Contains the extended AIM data. DL_ap: dictionary Contains the auto-populated loss model. - """ + # parse the AIM data - # print(AIM) # Look in the AIM.json file to see what you can access here + #print(AIM) # Look in the AIM.json file to see what you can access here # extract the General Information - gi = aim['GeneralInformation'] - + GI = AIM.get('GeneralInformation', None) + # GI_ap is the 'extended AIM data - this case no extended AIM data - gi_ap = gi.copy() - + GI_ap = GI.copy() + # Get the number of Stories - note the column heading needs to be exactly # 'NumberOfStories'. - nstories = gi_ap.get('NumberOfStories', None) + nstories = GI_ap.get('NumberOfStories', None) if nstories is None: + print("NumberOfStories attribute missing from AIM file.") return None, None, None - - # Get the fragility tag according to some building attribute; the - # NumberOfStories in this case. The fragility tag needs to be unique, i.e., - # one tag for each fragility group. The fragility tag has to match the file - # name of the json file in the 'ComponentDataFolder' (without the .json + + # Get the fragility tag according to some building attribute; the + # NumberOfStories in this case. The fragility tag needs to be unique, i.e., + # one tag for each fragility group. The fragility tag has to match the file + # name of the json file in the 'ComponentDataFolder' (without the .json # suffix) - + if nstories == 1: fragility_function_tag = 'building.1' elif nstories == 2: @@ -88,39 +60,40 @@ def auto_populate(aim: dict) -> tuple: elif nstories >= 3: fragility_function_tag = 'building.3andAbove' else: - print(f'Invalid number of storeys provided: {nstories}') # noqa: T201 + print(f"Invalid number of storeys provided: {nstories}") # prepare the component assignment - comp = pd.DataFrame( - {f'{fragility_function_tag}': ['ea', 1, 1, 1, 'N/A']}, - index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], - ).T - + CMP = pd.DataFrame( + {f'{fragility_function_tag}': [ 'ea', 1, 1, 1, 'N/A']}, + index = [ 'Units','Location','Direction','Theta_0','Family'] + ).T + # Populate the DL_ap - dl_ap = { - 'Asset': { - 'ComponentAssignmentFile': 'CMP_QNT.csv', - 'ComponentDatabase': 'None', - 'ComponentDatabasePath': 'CustomDLDataFolder/damage_Tsunami.csv', - }, - 'Damage': {'DamageProcess': 'None'}, - 'Demands': {}, - 'Losses': { - 'BldgRepair': { - 'ConsequenceDatabase': 'None', - 'ConsequenceDatabasePath': ( - 'CustomDLDataFolder/loss_repair_Tsunami.csv' - ), - 'MapApproach': 'User Defined', - 'MapFilePath': 'CustomDLDataFolder/loss_map.csv', - 'DecisionVariables': { - 'Cost': True, - 'Carbon': False, - 'Energy': False, - 'Time': False, - }, + DL_ap = { + "Asset": { + "ComponentAssignmentFile": "CMP_QNT.csv", + "ComponentDatabase": "None", + "ComponentDatabasePath": "CustomDLDataFolder/damage_Tsunami.csv" + }, + "Damage": { + "DamageProcess": "None" + }, + "Demands": { + }, + "Losses": { + "BldgRepair": { + "ConsequenceDatabase": "None", + "ConsequenceDatabasePath": "CustomDLDataFolder/loss_repair_Tsunami.csv", + "MapApproach": "User Defined", + "MapFilePath": "CustomDLDataFolder/loss_map.csv", + "DecisionVariables": { + "Cost": True, + "Carbon": False, + "Energy": False, + "Time": False + } + } } - }, - } + } - return gi_ap, dl_ap, comp + return GI_ap, DL_ap, CMP diff --git a/pyproject.toml b/pyproject.toml index 3f313d44a..31ac44313 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,9 @@ [tool.ruff] line-length = 85 -exclude = ["rulesets"] +exclude = [ + "rulesets", + "pelicun/tests/dl_calculation/e9/custom_pop.py" +] [tool.ruff.lint] # Enable all known categories From 3b3f5da4cca676aedd1223003e5e9ff601f516ce Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Thu, 24 Oct 2024 05:58:38 -0700 Subject: [PATCH 16/27] Remove timestamp. --- pelicun/tests/validation/inactive/3d_interpolation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pelicun/tests/validation/inactive/3d_interpolation.py b/pelicun/tests/validation/inactive/3d_interpolation.py index c98a58ccf..cab15c04c 100644 --- a/pelicun/tests/validation/inactive/3d_interpolation.py +++ b/pelicun/tests/validation/inactive/3d_interpolation.py @@ -36,7 +36,6 @@ """ With this code we verify that scipy's `RegularGridInterpolator` does what we expect. -Created: `Sat Jun 1 03:07:28 PM PDT 2024`. """ From bf3d07594ec95e991a10794e4eb657967d421b0d Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Thu, 24 Oct 2024 06:06:18 -0700 Subject: [PATCH 17/27] Revert changes to inactive scripts. --- .../validation/inactive/3d_interpolation.py | 3 +- .../inactive/pandas_convert_speed.py | 53 +++++++++++-------- pyproject.toml | 1 + 3 files changed, 35 insertions(+), 22 deletions(-) diff --git a/pelicun/tests/validation/inactive/3d_interpolation.py b/pelicun/tests/validation/inactive/3d_interpolation.py index cab15c04c..44e8caa81 100644 --- a/pelicun/tests/validation/inactive/3d_interpolation.py +++ b/pelicun/tests/validation/inactive/3d_interpolation.py @@ -66,12 +66,13 @@ interpolated_value = interp_func(test_values) # Compare output with the exact value. -data = pd.DataFrame( +df = pd.DataFrame( { 'exact': x1 + np.sqrt(x2) + np.sin(x3), 'interpolated': interpolated_value, } ) +print(df) # Note: This does work with a 2D case, and it could scale to more than # 3 dimensions. diff --git a/pelicun/tests/validation/inactive/pandas_convert_speed.py b/pelicun/tests/validation/inactive/pandas_convert_speed.py index 7f2493556..82e25414b 100644 --- a/pelicun/tests/validation/inactive/pandas_convert_speed.py +++ b/pelicun/tests/validation/inactive/pandas_convert_speed.py @@ -38,40 +38,51 @@ import pandas as pd -def benchmark() -> None: +def benchmark(): # Create a large DataFrame - data = pd.DataFrame(np.random.rand(1000000, 10), columns=list('ABCDEFGHIJ')) + df = pd.DataFrame(np.random.rand(1000000, 10), columns=list('ABCDEFGHIJ')) # Measure time for df.to_dict(orient='list') - time.time() - data.to_dict(orient='list') - time.time() + start_time = time.time() + df.to_dict(orient='list') + end_time = time.time() + print(f'Time taken with to_dict(orient="list"): {end_time - start_time} seconds') # Measure time for dictionary comprehension - time.time() - {col: data[col].tolist() for col in data.columns} - time.time() + start_time = time.time() + {col: df[col].tolist() for col in df.columns} + end_time = time.time() + print( + f'Time taken with dictionary comprehension: {end_time - start_time} seconds' + ) # Measure time for dictionary comprehension without to list - time.time() - {col: data[col] for col in data.columns} - time.time() + start_time = time.time() + {col: df[col] for col in df.columns} + end_time = time.time() + print( + f'Time taken with dictionary comprehension ' + f'without to_list: {end_time - start_time} seconds' + ) # Measure time for .values - time.time() - data.to_numpy() - time.time() + start_time = time.time() + df.values + end_time = time.time() + print(f'Time taken with .values: {end_time - start_time} seconds') # Measure time for using df.to_numpy() - time.time() - data_array = data.to_numpy() - {col: data_array[:, i].tolist() for i, col in enumerate(data.columns)} - time.time() + start_time = time.time() + data_array = df.to_numpy() + {col: data_array[:, i].tolist() for i, col in enumerate(df.columns)} + end_time = time.time() + print(f'Time taken with df.to_numpy(): {end_time - start_time} seconds') # Measure time for using df.to_dict() - time.time() - data.to_dict() - time.time() + start_time = time.time() + df.to_dict() + end_time = time.time() + print(f'Time taken with df.to_dict(): {end_time - start_time} seconds') if __name__ == '__main__': diff --git a/pyproject.toml b/pyproject.toml index 31ac44313..e4016cae2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,6 +26,7 @@ max-bool-expr=5 "pelicun/tests/*" = ["D", "N802", "SLF001", "PLR2004", "PLR6301"] "pelicun/resources/auto/*" = ["ALL"] "pelicun/tools/HDF_to_CSV.py" = ["ALL"] +"pelicun/tests/validation/inactive/*" = ["T201", "B018", "ANN", "PD"] [tool.ruff.format] quote-style = "single" From 7a82e5574b54a08e2c054bc58935faadf0943262 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Thu, 24 Oct 2024 06:13:38 -0700 Subject: [PATCH 18/27] Ignore `custom_pop.py` spell checking. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e4016cae2..080e2ae16 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,7 @@ quote-style = "single" [tool.codespell] ignore-words = ["ignore_words.txt"] -skip = ["*.html", "./htmlcov/*", "./doc_src/build/*", "./pelicun.egg-info/*", "./doc_src/*", "./doc/build/*", "*/rulesets/*"] +skip = ["*.html", "./htmlcov/*", "./doc_src/build/*", "./pelicun.egg-info/*", "./doc_src/*", "./doc/build/*", "*/rulesets/*", "custom_pop.py"] [tool.mypy] ignore_missing_imports = true From 977d90e11f7b6977219ff38e4e6de3ed83b1f5c1 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Thu, 24 Oct 2024 06:13:59 -0700 Subject: [PATCH 19/27] Update argument name to `aim`. --- pelicun/tests/dl_calculation/e9/custom_pop.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pelicun/tests/dl_calculation/e9/custom_pop.py b/pelicun/tests/dl_calculation/e9/custom_pop.py index 35c30d679..bdb47947d 100644 --- a/pelicun/tests/dl_calculation/e9/custom_pop.py +++ b/pelicun/tests/dl_calculation/e9/custom_pop.py @@ -7,7 +7,7 @@ import pandas as pd -def auto_populate(AIM): +def auto_populate(aim): """ Populates the DL model for tsunami example using custom fragility functions @@ -35,7 +35,7 @@ def auto_populate(AIM): #print(AIM) # Look in the AIM.json file to see what you can access here # extract the General Information - GI = AIM.get('GeneralInformation', None) + GI = aim.get('GeneralInformation', None) # GI_ap is the 'extended AIM data - this case no extended AIM data GI_ap = GI.copy() From cda0c00c72e88a3fe6aaccf3cb022a7ec6bcd312 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Thu, 24 Oct 2024 06:15:13 -0700 Subject: [PATCH 20/27] Fix typo. --- pelicun/uq.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pelicun/uq.py b/pelicun/uq.py index fae3a747f..8eac4c774 100644 --- a/pelicun/uq.py +++ b/pelicun/uq.py @@ -1289,7 +1289,7 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ Evaluate the inverse CDF. - Usses inverse probability integral transformation on the + Uses inverse probability integral transformation on the provided values. """ @@ -1350,7 +1350,7 @@ def inverse_transform(self, sample_size: int) -> np.ndarray: """ Evaluate the inverse CDF. - Usses inverse probability integral transformation on the + Uses inverse probability integral transformation on the provided values. """ @@ -1624,7 +1624,7 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ Evaluate the inverse CDF. - Usses inverse probability integral transformation on the + Uses inverse probability integral transformation on the provided values. Parameters @@ -1723,7 +1723,7 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ Evaluate the inverse CDF. - Usses inverse probability integral transformation on the + Uses inverse probability integral transformation on the provided values. Parameters @@ -1827,7 +1827,7 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ Evaluate the inverse CDF. - Usses inverse probability integral transformation on the + Uses inverse probability integral transformation on the provided values. Parameters @@ -1977,7 +1977,7 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ Evaluate the inverse CDF. - Usses inverse probability integral transformation on the + Uses inverse probability integral transformation on the provided values. Parameters From b2980f4e248a6eb37ab237a62522c6b8631d4ba4 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Thu, 24 Oct 2024 10:49:13 -0700 Subject: [PATCH 21/27] Add back comment. --- pelicun/tests/basic/test_demand_model.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pelicun/tests/basic/test_demand_model.py b/pelicun/tests/basic/test_demand_model.py index 2d4be8f12..020084313 100644 --- a/pelicun/tests/basic/test_demand_model.py +++ b/pelicun/tests/basic/test_demand_model.py @@ -597,6 +597,7 @@ def test__get_required_demand_type( def test__assemble_required_demand_data( self, assessment_instance: Assessment ) -> None: + # Utility demand case: two demands are required damage_model = assessment_instance.damage cmp_set = {'testing.component'} damage_model.load_model_parameters( From 4c8dc48ce0c10886880194a1a533bbe5b872532d Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Mon, 28 Oct 2024 10:00:54 -0700 Subject: [PATCH 22/27] Reverse changes to files in `SimCenterDBDL/`. The changes will be introduced again in a PR at the corresponding repository. --- .../SimCenterDBDL/damage_DB_FEMA_P58_2nd.json | 146 +++++++++--------- .../damage_DB_Hazus_EQ_bldg.json | 18 +-- .../damage_DB_Hazus_EQ_story.json | 6 +- .../loss_repair_DB_FEMA_P58_2nd.json | 146 +++++++++--------- 4 files changed, 158 insertions(+), 158 deletions(-) diff --git a/pelicun/resources/SimCenterDBDL/damage_DB_FEMA_P58_2nd.json b/pelicun/resources/SimCenterDBDL/damage_DB_FEMA_P58_2nd.json index f09ce6df4..e19590231 100644 --- a/pelicun/resources/SimCenterDBDL/damage_DB_FEMA_P58_2nd.json +++ b/pelicun/resources/SimCenterDBDL/damage_DB_FEMA_P58_2nd.json @@ -71,7 +71,7 @@ "D.50.92 - Other Electrical Systems" ] }, - "E - Equipment and furnishings": { + "E - Equipments and furnishings": { "E.20 - Furnishings": [ "E.20.22 - Movable Furnishings" ] @@ -8812,7 +8812,7 @@ "LS2": { "DS2": { "Description": "Structural damage but live load capacity remains intact. Buckling of steel, weld cracking.", - "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finishes." + "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finsihes." } }, "LS3": { @@ -8838,7 +8838,7 @@ "LS2": { "DS2": { "Description": "Buckling of steel, weld cracking.", - "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finishes." + "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finsihes." } }, "LS3": { @@ -10147,7 +10147,7 @@ }, "D.20.21.011a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10167,7 +10167,7 @@ }, "D.20.21.011b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10187,7 +10187,7 @@ }, "D.20.21.012a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10207,7 +10207,7 @@ }, "D.20.21.012b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10227,7 +10227,7 @@ }, "D.20.21.013a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10247,7 +10247,7 @@ }, "D.20.21.013b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10261,7 +10261,7 @@ }, "D.20.21.014a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10281,7 +10281,7 @@ }, "D.20.21.014b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10295,7 +10295,7 @@ }, "D.20.21.021a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC A or B, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10315,7 +10315,7 @@ }, "D.20.21.022a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC C, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10335,7 +10335,7 @@ }, "D.20.21.023a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10355,7 +10355,7 @@ }, "D.20.21.023b": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10375,7 +10375,7 @@ }, "D.20.21.024a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F (OSPHD or sim), PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10395,7 +10395,7 @@ }, "D.20.21.024b": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F (OSPHD or sim), BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10415,7 +10415,7 @@ }, "D.20.22.011a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10435,7 +10435,7 @@ }, "D.20.22.011b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10455,7 +10455,7 @@ }, "D.20.22.012a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10475,7 +10475,7 @@ }, "D.20.22.012b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10495,7 +10495,7 @@ }, "D.20.22.013a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10515,7 +10515,7 @@ }, "D.20.22.013b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10529,7 +10529,7 @@ }, "D.20.22.014a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10549,7 +10549,7 @@ }, "D.20.22.014b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10563,7 +10563,7 @@ }, "D.20.22.021a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10583,7 +10583,7 @@ }, "D.20.22.022a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC C, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10603,7 +10603,7 @@ }, "D.20.22.023a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10623,7 +10623,7 @@ }, "D.20.22.023b": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10643,7 +10643,7 @@ }, "D.20.22.024a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10663,7 +10663,7 @@ }, "D.20.22.024b": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10683,7 +10683,7 @@ }, "D.20.31.011b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC A,B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10703,7 +10703,7 @@ }, "D.20.31.012b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10723,7 +10723,7 @@ }, "D.20.31.013b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC D,E,F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10737,7 +10737,7 @@ }, "D.20.31.014b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC D,E,F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10751,7 +10751,7 @@ }, "D.20.31.021a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC A,B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10765,7 +10765,7 @@ }, "D.20.31.021b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC A,B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10785,7 +10785,7 @@ }, "D.20.31.022a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10799,7 +10799,7 @@ }, "D.20.31.022b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10819,7 +10819,7 @@ }, "D.20.31.023a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10833,7 +10833,7 @@ }, "D.20.31.023b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10847,7 +10847,7 @@ }, "D.20.31.024a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10861,7 +10861,7 @@ }, "D.20.31.024b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10875,7 +10875,7 @@ }, "D.20.51.011a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10895,7 +10895,7 @@ }, "D.20.51.011b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10915,7 +10915,7 @@ }, "D.20.51.012a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10935,7 +10935,7 @@ }, "D.20.51.012b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10955,7 +10955,7 @@ }, "D.20.51.013a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10975,7 +10975,7 @@ }, "D.20.51.013b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -10989,7 +10989,7 @@ }, "D.20.51.014a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11009,7 +11009,7 @@ }, "D.20.51.014b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11023,7 +11023,7 @@ }, "D.20.51.021a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11043,7 +11043,7 @@ }, "D.20.51.021b": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11057,7 +11057,7 @@ }, "D.20.51.022a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11077,7 +11077,7 @@ }, "D.20.51.023a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11097,7 +11097,7 @@ }, "D.20.51.023b": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11117,7 +11117,7 @@ }, "D.20.51.024a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11137,7 +11137,7 @@ }, "D.20.51.024b": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11157,7 +11157,7 @@ }, "D.20.61.011a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11177,7 +11177,7 @@ }, "D.20.61.011b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11197,7 +11197,7 @@ }, "D.20.61.012a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11217,7 +11217,7 @@ }, "D.20.61.012b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11237,7 +11237,7 @@ }, "D.20.61.013a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11257,7 +11257,7 @@ }, "D.20.61.013b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11271,7 +11271,7 @@ }, "D.20.61.014a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11291,7 +11291,7 @@ }, "D.20.61.014b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11305,7 +11305,7 @@ }, "D.20.61.021a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11325,7 +11325,7 @@ }, "D.20.61.022a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11345,7 +11345,7 @@ }, "D.20.61.023a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11365,7 +11365,7 @@ }, "D.20.61.023b": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11385,7 +11385,7 @@ }, "D.20.61.024a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -11405,7 +11405,7 @@ }, "D.20.61.024b": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "False", "LimitStates": { @@ -16573,4 +16573,4 @@ } } } -} +} \ No newline at end of file diff --git a/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_bldg.json b/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_bldg.json index 337d89947..c92fd79c6 100644 --- a/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_bldg.json +++ b/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_bldg.json @@ -3770,7 +3770,7 @@ }, "STR.URM.L.LC": { "Description": "Structural, Unreinforced Masonry Bearing Walls, Low-Rise, Low-Code", - "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", + "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -3801,7 +3801,7 @@ }, "STR.URM.L.PC": { "Description": "Structural, Unreinforced Masonry Bearing Walls, Low-Rise, Pre-Code", - "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", + "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -3832,7 +3832,7 @@ }, "STR.URM.M.LC": { "Description": "Structural, Unreinforced Masonry Bearing Walls, Mid-Rise, Low-Code", - "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", + "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -3863,7 +3863,7 @@ }, "STR.URM.M.PC": { "Description": "Structural, Unreinforced Masonry Bearing Walls, Mid-Rise, Pre-Code", - "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", + "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -7878,7 +7878,7 @@ }, "LF.URM.L.LC": { "Description": "Lifeline Facilities, Unreinforced Masonry Bearing Walls, Low-Rise, Low-Code", - "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", + "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -7909,7 +7909,7 @@ }, "LF.URM.L.PC": { "Description": "Lifeline Facilities, Unreinforced Masonry Bearing Walls, Low-Rise, Pre-Code", - "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", + "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -7940,7 +7940,7 @@ }, "LF.URM.M.LC": { "Description": "Lifeline Facilities, Unreinforced Masonry Bearing Walls, Mid-Rise, Low-Code", - "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", + "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -7971,7 +7971,7 @@ }, "LF.URM.M.PC": { "Description": "Lifeline Facilities, Unreinforced Masonry Bearing Walls, Mid-Rise, Pre-Code", - "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", + "Comments": "Lifeline Facility damage functions are expressed in terms of an equivalent value of PGA for efficient evaluation of buildings that are components of utility and transportation systems. Only structural damage functions are developed based on PGA, since structural damage is considered the most appropriate measure of damage for utility and transportation system facilities. Median values of equivalent-PGA fragility curves are based on median values of spectral displacement of the damage state of interest and an assumed demand spectrum shape that relates spectral response to PGA. As such, median values of equivalent PGA are very sensitive to the shape assumed for the demand spectrum. Spectrum shape is influenced by earthquake source (i.e., WUS vs. CEUS attenuation functions), earthquake magnitude (e.g., large vs. small magnitude events), distance from source to site, site conditions (e.g., soil vs. rock), and effective damping, which varies based on building properties and earthquake duration (e.g., short, moderate, or long duration). These fragility curves were developed for a single set of spectrum shape factors (a reference spectrum), and a formula is provided for modifying damage state medians to approximate other spectrum shapes. The reference spectrum represents ground shaking of a large magnitude (i.e., M7.0) western United States (WUS) earthquake for soil sites (e.g., Site Class D) at site-to-source distances of 15 km or greater. \nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nMid-Rise Building with 4-7 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -8188,4 +8188,4 @@ } } } -} +} \ No newline at end of file diff --git a/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_story.json b/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_story.json index 83ef5139a..bd78db139 100644 --- a/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_story.json +++ b/pelicun/resources/SimCenterDBDL/damage_DB_Hazus_EQ_story.json @@ -1662,7 +1662,7 @@ }, "STR.URM.LC": { "Description": "Structural, Unreinforced Masonry Bearing Walls, Low-Rise, Low-Code", - "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", + "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level Approximate Basis:\n1941-1975 construction in UBC Seismic Zone 2B, NEHRP Map Area 5\nPost-1941 construction in UBC Seismic Zone 2A, NEHRP Map Area 4\nPost-1975 construction in UBC Seismic Zone 1, NEHRP Map Area 2/3", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -1693,7 +1693,7 @@ }, "STR.URM.PC": { "Description": "Structural, Unreinforced Masonry Bearing Walls, Low-Rise, Pre-Code", - "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistorey buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", + "Comments": "Structural components represent the structural system in the building.\nStructural System: These buildings include structural elements that vary depending on the building\u2019s age and, to a lesser extent, its geographic location. In buildings built before 1900, the majority of floor and roof construction consists of wood sheathing supported by wood framing. In large multistory buildings, the floors are cast- in-place concrete supported by the unreinforced masonry walls and/or steel or concrete interior framing. In unreinforced masonry constructed built after 1950 outside California, wood floors usually have plywood rather than board sheathing. In regions of lower seismicity, buildings of this type constructed more recently can include floor and roof framing that consists of metal deck and concrete fill supported by steel framing elements. The perimeter walls, and possibly some interior walls, are unreinforced masonry. The walls may or may not be anchored to the diaphragms. Ties between the walls and diaphragms are more common for the bearing walls than for walls that are parallel to the floor framing. Roof ties are usually less common and more erratically spaced than those at the floor levels. Interior partitions that interconnect the floors and roof can reduce diaphragm displacements.\nLow-Rise Building with 1-3 stories.\nDesign Level: Approximate Basis: UBC Seismic Zone 0, NEHRP Map Area 1.\nPre-1941 construction in all other UBC and NEHRP areas.\nPre-Code damage functions are appropriate for modeling older buildings that were not designed for earthquake load, regardless of where they are located in the United States.", "SuggestedComponentBlockSize": "1 EA", "RoundUpToIntegerQuantity": "True", "LimitStates": { @@ -2050,4 +2050,4 @@ } } } -} +} \ No newline at end of file diff --git a/pelicun/resources/SimCenterDBDL/loss_repair_DB_FEMA_P58_2nd.json b/pelicun/resources/SimCenterDBDL/loss_repair_DB_FEMA_P58_2nd.json index db558ac72..a715e45b2 100644 --- a/pelicun/resources/SimCenterDBDL/loss_repair_DB_FEMA_P58_2nd.json +++ b/pelicun/resources/SimCenterDBDL/loss_repair_DB_FEMA_P58_2nd.json @@ -71,7 +71,7 @@ "D.50.92 - Other Electrical Systems" ] }, - "E - Equipment and furnishings": { + "E - Equipments and furnishings": { "E.20 - Furnishings": [ "E.20.22 - Movable Furnishings" ] @@ -7278,7 +7278,7 @@ }, "DS2": { "Description": "Structural damage but live load capacity remains intact. Buckling of steel, weld cracking.", - "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finishes." + "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finsihes." }, "DS3": { "Description": "Loss of live load capacity. Connection and or weld fracture.", @@ -7299,7 +7299,7 @@ }, "DS2": { "Description": "Buckling of steel, weld cracking.", - "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finishes." + "RepairAction": "Removal and replacement of damaged components. Field repair of damage (such as welding). Repair finsihes." }, "DS3": { "Description": "Loss of live load capacity. Connection and or weld fracture.", @@ -8453,7 +8453,7 @@ }, "D.20.21.011a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8470,7 +8470,7 @@ }, "D.20.21.011b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8487,7 +8487,7 @@ }, "D.20.21.012a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8504,7 +8504,7 @@ }, "D.20.21.012b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8521,7 +8521,7 @@ }, "D.20.21.013a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8538,7 +8538,7 @@ }, "D.20.21.013b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8551,7 +8551,7 @@ }, "D.20.21.014a": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8568,7 +8568,7 @@ }, "D.20.21.014b": { "Description": "Cold or Hot Potable - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8581,7 +8581,7 @@ }, "D.20.21.021a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC A or B, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8598,7 +8598,7 @@ }, "D.20.21.022a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC C, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8615,7 +8615,7 @@ }, "D.20.21.023a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F, PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8632,7 +8632,7 @@ }, "D.20.21.023b": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F, BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8649,7 +8649,7 @@ }, "D.20.21.024a": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F (OSPHD or sim), PIPING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8666,7 +8666,7 @@ }, "D.20.21.024b": { "Description": "Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F (OSPHD or sim), BRACING FRAGILITY", - "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Potable water. Costing based upon 1000 ft segments.\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8683,7 +8683,7 @@ }, "D.20.22.011a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8700,7 +8700,7 @@ }, "D.20.22.011b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8717,7 +8717,7 @@ }, "D.20.22.012a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8734,7 +8734,7 @@ }, "D.20.22.012b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8751,7 +8751,7 @@ }, "D.20.22.013a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8768,7 +8768,7 @@ }, "D.20.22.013b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8781,7 +8781,7 @@ }, "D.20.22.014a": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8798,7 +8798,7 @@ }, "D.20.22.014b": { "Description": "Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8811,7 +8811,7 @@ }, "D.20.22.021a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8828,7 +8828,7 @@ }, "D.20.22.022a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC C, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8845,7 +8845,7 @@ }, "D.20.22.023a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8862,7 +8862,7 @@ }, "D.20.22.023b": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8879,7 +8879,7 @@ }, "D.20.22.024a": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8896,7 +8896,7 @@ }, "D.20.22.024b": { "Description": "Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Heating water. Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8913,7 +8913,7 @@ }, "D.20.31.011b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC A,B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8930,7 +8930,7 @@ }, "D.20.31.012b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8947,7 +8947,7 @@ }, "D.20.31.013b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC D,E,F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8960,7 +8960,7 @@ }, "D.20.31.014b": { "Description": "Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC D,E,F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8973,7 +8973,7 @@ }, "D.20.31.021a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC A,B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -8986,7 +8986,7 @@ }, "D.20.31.021b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC A,B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9003,7 +9003,7 @@ }, "D.20.31.022a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9016,7 +9016,7 @@ }, "D.20.31.022b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9033,7 +9033,7 @@ }, "D.20.31.023a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9046,7 +9046,7 @@ }, "D.20.31.023b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9059,7 +9059,7 @@ }, "D.20.31.024a": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9072,7 +9072,7 @@ }, "D.20.31.024b": { "Description": "Sanitary Waste Piping - Cast Iron w/bell and spigot couplings, SDC D,E,F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe\nConstruction Quality: Special Regulation (e.g. OSHPD) for piping installations\nSeismic Installation Conditions: SDC D, E or F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9085,7 +9085,7 @@ }, "D.20.51.011a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9102,7 +9102,7 @@ }, "D.20.51.011b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9119,7 +9119,7 @@ }, "D.20.51.012a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9136,7 +9136,7 @@ }, "D.20.51.012b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9153,7 +9153,7 @@ }, "D.20.51.013a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9170,7 +9170,7 @@ }, "D.20.51.013b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9183,7 +9183,7 @@ }, "D.20.51.014a": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9200,7 +9200,7 @@ }, "D.20.51.014b": { "Description": "Chilled Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9213,7 +9213,7 @@ }, "D.20.51.021a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9230,7 +9230,7 @@ }, "D.20.51.021b": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9243,7 +9243,7 @@ }, "D.20.51.022a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9260,7 +9260,7 @@ }, "D.20.51.023a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9277,7 +9277,7 @@ }, "D.20.51.023b": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9294,7 +9294,7 @@ }, "D.20.51.024a": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9311,7 +9311,7 @@ }, "D.20.51.024b": { "Description": "Chilled Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9328,7 +9328,7 @@ }, "D.20.61.011a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9345,7 +9345,7 @@ }, "D.20.61.011b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC A or B, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9362,7 +9362,7 @@ }, "D.20.61.012a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9379,7 +9379,7 @@ }, "D.20.61.012b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC C, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9396,7 +9396,7 @@ }, "D.20.61.013a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9413,7 +9413,7 @@ }, "D.20.61.013b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9426,7 +9426,7 @@ }, "D.20.61.014a": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9443,7 +9443,7 @@ }, "D.20.61.014b": { "Description": "Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe 2.5 inches in diameter or less\nConstruction Quality: Special Regulations (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9456,7 +9456,7 @@ }, "D.20.61.021a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC A or B, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC A or B (no seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9473,7 +9473,7 @@ }, "D.20.61.022a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC C, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC C (low seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9490,7 +9490,7 @@ }, "D.20.61.023a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9507,7 +9507,7 @@ }, "D.20.61.023b": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Normal\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9524,7 +9524,7 @@ }, "D.20.61.024a": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), PIPING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -9541,7 +9541,7 @@ }, "D.20.61.024b": { "Description": "Steam Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F (OSHPD or sim), BRACING FRAGILITY", - "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence separately. Cost includes allowance for MEP relocation to perform work.", + "Comments": "Costing based upon 1000 ft segments of pipe, pipe greater than 2.5 inches in diameter\nConstruction Quality: Special Regulation (e.g. OSHPD) for Piping Installations\nSeismic Installation Conditions: SDC D, E, F (high seismic design)\nNotes: Consequence is for piping only. Enter floor wetting consequence seperately. Cost includes allowance for MEP relocation to perform work.", "SuggestedComponentBlockSize": "1000 LF", "RoundUpToIntegerQuantity": "NO", "ControllingDemand": "Damage Quantity", @@ -14006,4 +14006,4 @@ } } } -} +} \ No newline at end of file From 1f36b1089af9e5e0de8fe8ad9d035e53a9f587c8 Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Mon, 28 Oct 2024 10:27:35 -0700 Subject: [PATCH 23/27] Bug fix & restore print statements - Rename lingering `structureType` to `structure_type`. Likely came from code merging operation. - Restore `print` statements. - Fix typos. - Update Ruff configuration. --- pelicun/resources/auto/Hazus_Earthquake_IM.py | 155 +++++++++++------- pyproject.toml | 4 +- 2 files changed, 101 insertions(+), 58 deletions(-) diff --git a/pelicun/resources/auto/Hazus_Earthquake_IM.py b/pelicun/resources/auto/Hazus_Earthquake_IM.py index d2265cbcf..123d5cc5a 100644 --- a/pelicun/resources/auto/Hazus_Earthquake_IM.py +++ b/pelicun/resources/auto/Hazus_Earthquake_IM.py @@ -74,7 +74,7 @@ # Convert common length units -def convertUnits(value, unit_in, unit_out): # noqa: N802 +def convertUnits(value, unit_in, unit_out): """ Convert units. """ @@ -96,19 +96,23 @@ def convertUnits(value, unit_in, unit_out): # noqa: N802 'mile': mile, } if (unit_in not in aval_types) or (unit_out not in aval_types): + print( + f'The unit {unit_in} or {unit_out} ' + f'are used in auto_population but not supported' + ) return None return value * scale_map[unit_in] / scale_map[unit_out] -def convertBridgeToHAZUSclass(aim): # noqa: C901, N802 +def convertBridgeToHAZUSclass(aim): # noqa: C901 # TODO: replace labels in AIM with standard CamelCase versions structure_type = aim['BridgeClass'] # if ( - # type(structureType) == str - # and len(structureType) > 3 - # and structureType[:3] == "HWB" - # and 0 < int(structureType[3:]) - # and 29 > int(structureType[3:]) + # type(structure_type) == str + # and len(structure_type) > 3 + # and structure_type[:3] == "HWB" + # and 0 < int(structure_type[3:]) + # and 29 > int(structure_type[3:]) # ): # return AIM["bridge_class"] state = aim['StateCode'] @@ -136,7 +140,7 @@ def convertBridgeToHAZUSclass(aim): # noqa: C901, N802 else: bridge_class = 'HWB4' - elif structureType in list(range(101, 107)): + elif structure_type in list(range(101, 107)): if not seismic: if state != 6: bridge_class = 'HWB5' @@ -145,19 +149,19 @@ def convertBridgeToHAZUSclass(aim): # noqa: C901, N802 else: bridge_class = 'HWB7' - elif structureType in [205, 206]: + elif structure_type in [205, 206]: if not seismic: bridge_class = 'HWB8' else: bridge_class = 'HWB9' - elif structureType in list(range(201, 207)): + elif structure_type in list(range(201, 207)): if not seismic: bridge_class = 'HWB10' else: bridge_class = 'HWB11' - elif structureType in list(range(301, 307)): + elif structure_type in list(range(301, 307)): if not seismic: if len_max_span >= 20: if state != 6: @@ -172,7 +176,7 @@ def convertBridgeToHAZUSclass(aim): # noqa: C901, N802 else: bridge_class = 'HWB14' - elif structureType in list(range(402, 411)): + elif structure_type in list(range(402, 411)): if not seismic: if len_max_span >= 20: bridge_class = 'HWB15' @@ -183,7 +187,7 @@ def convertBridgeToHAZUSclass(aim): # noqa: C901, N802 else: bridge_class = 'HWB16' - elif structureType in list(range(501, 507)): + elif structure_type in list(range(501, 507)): if not seismic: if state != 6: bridge_class = 'HWB17' @@ -192,13 +196,13 @@ def convertBridgeToHAZUSclass(aim): # noqa: C901, N802 else: bridge_class = 'HWB19' - elif structureType in [605, 606]: + elif structure_type in [605, 606]: if not seismic: bridge_class = 'HWB20' else: bridge_class = 'HWB21' - elif structureType in list(range(601, 608)): + elif structure_type in list(range(601, 608)): if not seismic: bridge_class = 'HWB22' else: @@ -210,7 +214,7 @@ def convertBridgeToHAZUSclass(aim): # noqa: C901, N802 return bridge_class -def convertTunnelToHAZUSclass(aim) -> str: # noqa: N802 +def convertTunnelToHAZUSclass(aim) -> str: if ('Bored' in aim['ConstructType']) or ('Drilled' in aim['ConstructType']): return 'HTU1' elif ('Cut' in aim['ConstructType']) or ('Cover' in aim['ConstructType']): @@ -220,7 +224,7 @@ def convertTunnelToHAZUSclass(aim) -> str: # noqa: N802 return 'HTU2' -def convertRoadToHAZUSclass(aim) -> str: # noqa: N802 +def convertRoadToHAZUSclass(aim) -> str: if aim['RoadType'] in ['Primary', 'Secondary']: return 'HRD1' @@ -247,7 +251,7 @@ def convert_story_rise(structure_type, stories): 'Missing "NumberOfStories" information, ' 'cannot infer `rise` attribute of archetype' ) - raise ValueError(msg) + raise ValueError(msg) # noqa: B904 if structure_type == 'RM1': rise = 'L' if stories <= 3 else 'M' @@ -352,10 +356,10 @@ def auto_populate(aim): # noqa: C901 lf = f'LF.{bt}.{dl}' # fmt: off - comp = pd.DataFrame( # noqa - {f'{lf}': ['ea', 1, 1, 1, 'N/A']}, # noqa - index = ['Units','Location','Direction','Theta_0','Family'] # noqa - ).T # noqa + comp = pd.DataFrame( + {f'{lf}': ['ea', 1, 1, 1, 'N/A']}, # noqa: E241 + index = ['Units','Location','Direction','Theta_0','Family'] # noqa: E231, E251 + ).T # fmt: on # if needed, add components to simulate damage from ground failure @@ -366,11 +370,11 @@ def auto_populate(aim): # noqa: C901 fg_gf_v = f'GF.V.{foundation_type}' # fmt: off - comp_gf = pd.DataFrame( # noqa - {f'{fg_gf_h}':[ 'ea', 1, 1, 1, 'N/A'], # noqa - f'{fg_gf_v}':[ 'ea', 1, 3, 1, 'N/A']}, # noqa - index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa - ).T # noqa + comp_gf = pd.DataFrame( + {f'{fg_gf_h}':[ 'ea', 1, 1, 1, 'N/A'], # noqa: E201, E231, E241 + f'{fg_gf_v}':[ 'ea', 1, 3, 1, 'N/A']}, # noqa: E201, E231, E241 + index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa: E201, E231, E251 + ).T # fmt: on comp = pd.concat([comp, comp_gf], axis=0) @@ -415,11 +419,11 @@ def auto_populate(aim): # noqa: C901 gi_ap['BridgeHazusClass'] = bt # fmt: off - comp = pd.DataFrame( # noqa - {f'HWB.GS.{bt[3:]}': [ 'ea', 1, 1, 1, 'N/A'], # noqa - f'HWB.GF': [ 'ea', 1, 1, 1, 'N/A']}, # noqa - index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa - ).T # noqa + comp = pd.DataFrame( + {f'HWB.GS.{bt[3:]}': [ 'ea', 1, 1, 1, 'N/A'], # noqa: E201, E241 + f'HWB.GF': [ 'ea', 1, 1, 1, 'N/A']}, # noqa: E201, E241, F541 + index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa: E201, E231, E251 + ).T # fmt: on dl_ap = { @@ -448,11 +452,11 @@ def auto_populate(aim): # noqa: C901 gi_ap['TunnelHazusClass'] = tt # fmt: off - comp = pd.DataFrame( # noqa - {f'HTU.GS.{tt[3:]}': [ 'ea', 1, 1, 1, 'N/A'], # noqa - f'HTU.GF': [ 'ea', 1, 1, 1, 'N/A']}, # noqa - index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa - ).T # noqa + comp = pd.DataFrame( + {f'HTU.GS.{tt[3:]}': [ 'ea', 1, 1, 1, 'N/A'], # noqa: E201, E241 + f'HTU.GF': [ 'ea', 1, 1, 1, 'N/A']}, # noqa: E201, E241, F541 + index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa: E201, E231, E251 + ).T # fmt: on dl_ap = { @@ -480,10 +484,10 @@ def auto_populate(aim): # noqa: C901 gi_ap['RoadHazusClass'] = rt # fmt: off - comp = pd.DataFrame( # noqa - {f'HRD.GF.{rt[3:]}':[ 'ea', 1, 1, 1, 'N/A']}, # noqa - index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa - ).T # noqa + comp = pd.DataFrame( + {f'HRD.GF.{rt[3:]}':[ 'ea', 1, 1, 1, 'N/A']}, # noqa: E201, E231, E241 + index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa: E201, E231, E251 + ).T # fmt: on dl_ap = { @@ -555,26 +559,44 @@ def auto_populate(aim): # noqa: C901 missing, if the pipe is smaller than or equal to 20 inches, the material is Cast Iron (CI) otherwise the pipe material is steel. - If the material is steel (ST), either based on user specified - input or the assumption due to the lack of the user-input, the year - that the pipe is constructed define the flexibility status per HAZUS - instructions. If the pipe is built in 1935 or after, it is, the pipe - is Ductile Steel (DS), and otherwise it is Brittle Steel (BS). - If the pipe is missing construction year and is built by steel, - we assume consevatively that the pipe is brittle (i.e., BS) + If the material is steel (ST), either based on user + specified input or the assumption due to the lack of the + user-input, the year that the pipe is constructed define + the flexibility status per HAZUS instructions. If the pipe + is built in 1935 or after, it is, the pipe is Ductile + Steel (DS), and otherwise it is Brittle Steel (BS). + If the pipe is missing construction year and is built + by steel, we assume consevatively that the pipe is brittle + (i.e., BS) """ if pipe_material is None: if pipe_diameter > 20 * 0.0254: # 20 inches in meter + print( + f'Asset {asset_name} is missing material. Material is\ + assumed to be Cast Iron' + ) pipe_material = 'CI' else: + print( + f'Asset {asset_name} is missing material. Material is ' + f'assumed to be Steel (ST)' + ) pipe_material = 'ST' if pipe_material == 'ST': if (pipe_construction_year is not None) and ( pipe_construction_year >= 1935 ): + print( + f'Asset {asset_name} has material of "ST" is assumed to be\ + Ductile Steel' + ) pipe_material = 'DS' else: + print( + f'Asset {asset_name} has material of "ST" is assumed to be ' + f'Brittle Steel' + ) pipe_material = 'BS' pipe_flexibility = pipe_material_map.get(pipe_material, 'missing') @@ -607,12 +629,12 @@ def auto_populate(aim): # noqa: C901 # Define performance model # fmt: off - comp = pd.DataFrame( # noqa - {f'PWP.{pipe_flexibility}.GS': ['ea', location_string, '0', 1, 'N/A'], # noqa - f'PWP.{pipe_flexibility}.GF': ['ea', location_string, '0', 1, 'N/A'], # noqa - 'aggregate': ['ea', location_string, '0', 1, 'N/A']}, # noqa - index = ['Units','Location','Direction','Theta_0','Family'] # noqa - ).T # noqa + comp = pd.DataFrame( + {f'PWP.{pipe_flexibility}.GS': ['ea', location_string, '0', 1, 'N/A'], + f'PWP.{pipe_flexibility}.GF': ['ea', location_string, '0', 1, 'N/A'], + 'aggregate': ['ea', location_string, '0', 1, 'N/A']}, + index = ['Units','Location','Direction','Theta_0','Family'] # noqa: E231, E251 + ).T # fmt: on # Set up the demand cloning configuration for the pipe @@ -629,7 +651,7 @@ def auto_populate(aim): # noqa: C901 ) demand_cloning_config = {} for edp in response_data.columns: - tag, location, direction = edp + tag, location, direction = edp # noqa: F841 demand_cloning_config['-'.join(edp)] = [ f'{tag}-{x}-{direction}' @@ -711,15 +733,36 @@ def auto_populate(aim): # noqa: C901 raise ValueError(msg) if tank_location == 'AG' and tank_material == 'C': + print( + f'The tank {asset_name} is Above Ground (i.e., AG), but \ + the material type is Concrete ("C"). Tank type "C" is not \ + defined for AG tanks. The tank is assumed to be Steel ("S")' + ) tank_material = 'S' if tank_location == 'AG' and tank_material == 'W': + print( + f'The tank {asset_name} is Above Ground (i.e., AG), but \ + the material type is Wood ("W"). Tank type "W" is not \ + defined for AG tanks. The tank is assumed to be Steel ("S")' + ) tank_material = 'S' if tank_location == 'B' and tank_material == 'S': + print( + f'The tank {asset_name} is buried (i.e., B), but the\ + material type is Steel ("S"). \ + Tank type "S" is not defined for\ + B tanks. The tank is assumed to be Concrete ("C")' + ) tank_material = 'C' if tank_location == 'B' and tank_material == 'W': + print( + f'The tank {asset_name} is buried (i.e., B), but the\ + material type is Wood ("W"). Tank type "W" is not defined \ + for B tanks. The tank is assumed to be Concrete ("C")' + ) tank_material = 'C' if tank_anchored == 1: @@ -728,7 +771,7 @@ def auto_populate(aim): # noqa: C901 tank_anchored = 0 cur_tank_cmp_line = tank_cmp_lines[ - (tank_location, tank_material, tank_anchored) + tank_location, tank_material, tank_anchored ] comp = pd.DataFrame( diff --git a/pyproject.toml b/pyproject.toml index 080e2ae16..6350470c2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,7 +24,7 @@ max-bool-expr=5 [tool.ruff.lint.per-file-ignores] "pelicun/tests/*" = ["D", "N802", "SLF001", "PLR2004", "PLR6301"] -"pelicun/resources/auto/*" = ["ALL"] +"pelicun/resources/auto/*" = ['PLR', 'T', 'N', 'ANN', 'D', 'PTH', 'INP', 'DOC', 'RET', 'TD'] "pelicun/tools/HDF_to_CSV.py" = ["ALL"] "pelicun/tests/validation/inactive/*" = ["T201", "B018", "ANN", "PD"] @@ -33,7 +33,7 @@ quote-style = "single" [tool.codespell] ignore-words = ["ignore_words.txt"] -skip = ["*.html", "./htmlcov/*", "./doc_src/build/*", "./pelicun.egg-info/*", "./doc_src/*", "./doc/build/*", "*/rulesets/*", "custom_pop.py"] +skip = ["*.html", "./htmlcov/*", "./doc_src/build/*", "./pelicun.egg-info/*", "./doc_src/*", "./doc/build/*", "*/rulesets/*", "custom_pop.py", "*/SimCenterDBDL/*"] [tool.mypy] ignore_missing_imports = true From 3544f039efd286b118e4f4a512635e1e66ecd01d Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Mon, 28 Oct 2024 11:28:02 -0700 Subject: [PATCH 24/27] Reverse changes in auto population files used in tests. - auto_HU_NJ.py - auto_HU_LA.pyo - custom_pop.py Renaming of variable `AIM` to `aim` was preserved. --- pelicun/tests/dl_calculation/e7/auto_HU_NJ.py | 110 +++++++++--------- pelicun/tests/dl_calculation/e8/auto_HU_LA.py | 101 ++++++++-------- pelicun/tests/dl_calculation/e9/custom_pop.py | 87 +++++++------- pyproject.toml | 5 +- 4 files changed, 149 insertions(+), 154 deletions(-) diff --git a/pelicun/tests/dl_calculation/e7/auto_HU_NJ.py b/pelicun/tests/dl_calculation/e7/auto_HU_NJ.py index c92024c93..512893152 100644 --- a/pelicun/tests/dl_calculation/e7/auto_HU_NJ.py +++ b/pelicun/tests/dl_calculation/e7/auto_HU_NJ.py @@ -1,4 +1,5 @@ -# # noqa: N999 +# -*- coding: utf-8 -*- +# # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California # @@ -43,16 +44,16 @@ # Meredith Lockhead # Tracy Kijewski-Correa -from __future__ import annotations - import pandas as pd + +from WindMetaVarRulesets import parse_BIM from BuildingClassRulesets import building_class +from FloodAssmRulesets import Assm_config from FloodClassRulesets import FL_config from WindCECBRulesets import CECB_config from WindCERBRulesets import CERB_config from WindMECBRulesets import MECB_config from WindMERBRulesets import MERB_config -from WindMetaVarRulesets import parse_BIM from WindMHRulesets import MH_config from WindMLRIRulesets import MLRI_config from WindMLRMRulesets import MLRM_config @@ -65,12 +66,12 @@ from WindWSFRulesets import WSF_config -def auto_populate(aim: dict) -> tuple[dict, dict, pd.DataFrame]: # noqa: C901 +def auto_populate(aim): """ - Populates the DL model for hurricane assessments in Atlantic County, NJ. + Populates the DL model for hurricane assessments in Atlantic County, NJ Assumptions: - - Everything relevant to auto-population is provided in the Building + - Everything relevant to auto-population is provided in the Buiding Information Model (AIM). - The information expected in the AIM file is described in the parse_AIM method. @@ -84,66 +85,63 @@ def auto_populate(aim: dict) -> tuple[dict, dict, pd.DataFrame]: # noqa: C901 Returns ------- GI_ap: dictionary - Contains the extended AIM data. + Containes the extended AIM data. DL_ap: dictionary Contains the auto-populated loss model. - - Raises - ------ - ValueError - If the building class is not recognized. - """ + # extract the General Information - gi = aim.get('GeneralInformation') + GI = aim.get('GeneralInformation', None) # parse the GI data - gi_ap = parse_BIM(gi, location='NJ', hazards=['wind', 'inundation']) + GI_ap = parse_BIM(GI, location="NJ", hazards=['wind', 'inundation']) # identify the building class - bldg_class = building_class(gi_ap, hazard='wind') + bldg_class = building_class(GI_ap, hazard='wind') # prepare the building configuration string if bldg_class == 'WSF': - bldg_config = WSF_config(gi_ap) + bldg_config = WSF_config(GI_ap) elif bldg_class == 'WMUH': - bldg_config = WMUH_config(gi_ap) + bldg_config = WMUH_config(GI_ap) elif bldg_class == 'MSF': - bldg_config = MSF_config(gi_ap) + bldg_config = MSF_config(GI_ap) elif bldg_class == 'MMUH': - bldg_config = MMUH_config(gi_ap) + bldg_config = MMUH_config(GI_ap) elif bldg_class == 'MLRM': - bldg_config = MLRM_config(gi_ap) + bldg_config = MLRM_config(GI_ap) elif bldg_class == 'MLRI': - bldg_config = MLRI_config(gi_ap) + bldg_config = MLRI_config(GI_ap) elif bldg_class == 'MERB': - bldg_config = MERB_config(gi_ap) + bldg_config = MERB_config(GI_ap) elif bldg_class == 'MECB': - bldg_config = MECB_config(gi_ap) + bldg_config = MECB_config(GI_ap) elif bldg_class == 'CECB': - bldg_config = CECB_config(gi_ap) + bldg_config = CECB_config(GI_ap) elif bldg_class == 'CERB': - bldg_config = CERB_config(gi_ap) + bldg_config = CERB_config(GI_ap) elif bldg_class == 'SPMB': - bldg_config = SPMB_config(gi_ap) + bldg_config = SPMB_config(GI_ap) elif bldg_class == 'SECB': - bldg_config = SECB_config(gi_ap) + bldg_config = SECB_config(GI_ap) elif bldg_class == 'SERB': - bldg_config = SERB_config(gi_ap) + bldg_config = SERB_config(GI_ap) elif bldg_class == 'MH': - bldg_config = MH_config(gi_ap) + bldg_config = MH_config(GI_ap) else: - msg = ( - f'Building class {bldg_class} not recognized by the ' - f'auto-population routine.' + raise ValueError( + f"Building class {bldg_class} not recognized by the " + f"auto-population routine." ) - raise ValueError(msg) # prepare the flood rulesets - fld_config = FL_config(gi_ap) + fld_config = FL_config(GI_ap) + + # prepare the assembly loss compositions + hu_assm, fl_assm = Assm_config(GI_ap) # prepare the component assignment - comp = pd.DataFrame( + CMP = pd.DataFrame( { f'{bldg_config}': ['ea', 1, 1, 1, 'N/A'], f'{fld_config}': ['ea', 1, 1, 1, 'N/A'], @@ -151,28 +149,28 @@ def auto_populate(aim: dict) -> tuple[dict, dict, pd.DataFrame]: # noqa: C901 index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], ).T - dl_ap = { - 'Asset': { - 'ComponentAssignmentFile': 'CMP_QNT.csv', - 'ComponentDatabase': 'Hazus Hurricane', - 'NumberOfStories': f"{gi_ap['NumberOfStories']}", - 'OccupancyType': f"{gi_ap['OccupancyClass']}", - 'PlanArea': f"{gi_ap['PlanArea']}", + DL_ap = { + "Asset": { + "ComponentAssignmentFile": "CMP_QNT.csv", + "ComponentDatabase": "Hazus Hurricane", + "NumberOfStories": f"{GI_ap['NumberOfStories']}", + "OccupancyType": f"{GI_ap['OccupancyClass']}", + "PlanArea": f"{GI_ap['PlanArea']}", }, - 'Damage': {'DamageProcess': 'Hazus Hurricane'}, - 'Demands': {}, - 'Losses': { - 'BldgRepair': { - 'ConsequenceDatabase': 'Hazus Hurricane', - 'MapApproach': 'Automatic', - 'DecisionVariables': { - 'Cost': True, - 'Carbon': False, - 'Energy': False, - 'Time': False, + "Damage": {"DamageProcess": "Hazus Hurricane"}, + "Demands": {}, + "Losses": { + "BldgRepair": { + "ConsequenceDatabase": "Hazus Hurricane", + "MapApproach": "Automatic", + "DecisionVariables": { + "Cost": True, + "Carbon": False, + "Energy": False, + "Time": False, }, } }, } - return gi_ap, dl_ap, comp + return GI_ap, DL_ap, CMP diff --git a/pelicun/tests/dl_calculation/e8/auto_HU_LA.py b/pelicun/tests/dl_calculation/e8/auto_HU_LA.py index e8c5d575f..74edf9458 100644 --- a/pelicun/tests/dl_calculation/e8/auto_HU_LA.py +++ b/pelicun/tests/dl_calculation/e8/auto_HU_LA.py @@ -1,4 +1,5 @@ -# # noqa: N999 +# -*- coding: utf-8 -*- +# # Copyright (c) 2018 Leland Stanford Junior University # Copyright (c) 2018 The Regents of the University of California # @@ -32,7 +33,7 @@ # # You should have received a copy of the BSD 3-Clause License along with # this file. If not, see . - +# # Contributors: # Adam Zsarnóczay # Kuanshi Zhong @@ -43,109 +44,101 @@ # Meredith Lockhead # Tracy Kijewski-Correa - -from __future__ import annotations - -import contextlib - import pandas as pd -from BldgClassRulesets import building_class + from MetaVarRulesets import parse_BIM -from WindWMUHRulesets import WMUH_config +from BldgClassRulesets import building_class from WindWSFRulesets import WSF_config +from WindWMUHRulesets import WMUH_config -def auto_populate(aim: dict) -> tuple[dict, dict, pd.DataFrame]: +def auto_populate(aim): """ - Populates the DL model for hurricane assessments in Atlantic County, NJ. + Populates the DL model for hurricane assessments in Atlantic County, NJ Assumptions: - - Everything relevant to auto-population is provided in the Building + - Everything relevant to auto-population is provided in the Buiding Information Model (AIM). - The information expected in the AIM file is described in the parse_GI method. Parameters ---------- - AIM: dictionary + aim: dictionary Contains the information that is available about the asset and will be used to auto-popualate the damage and loss model. Returns ------- GI_ap: dictionary - Contains the extended BIM data. + Containes the extended BIM data. DL_ap: dictionary Contains the auto-populated loss model. - - Raises - ------ - ValueError - If the building class is not recognized. - """ + # extract the General Information - gi = aim.get('GeneralInformation') + GI = aim.get('GeneralInformation', None) # parse the GI data - gi_ap = parse_BIM( - gi, - location='LA', + GI_ap = parse_BIM( + GI, + location="LA", hazards=[ 'wind', ], ) # identify the building class - bldg_class = building_class(gi_ap, hazard='wind') - gi_ap.update({'HazusClassW': bldg_class}) + bldg_class = building_class(GI_ap, hazard='wind') + GI_ap.update({'HazusClassW': bldg_class}) # prepare the building configuration string if bldg_class == 'WSF': - bldg_config = WSF_config(gi_ap) + bldg_config = WSF_config(GI_ap) elif bldg_class == 'WMUH': - bldg_config = WMUH_config(gi_ap) + bldg_config = WMUH_config(GI_ap) else: - msg = ( - f'Building class {bldg_class} not recognized by the ' - f'auto-population routine.' + raise ValueError( + f"Building class {bldg_class} not recognized by the " + f"auto-population routine." ) - raise ValueError(msg) # drop keys of internal variables from GI_ap dict internal_vars = ['V_ult', 'V_asd'] for var in internal_vars: - with contextlib.suppress(KeyError): - gi_ap.pop(var) + try: + GI_ap.pop(var) + except KeyError: + pass # prepare the component assignment - comp = pd.DataFrame( + CMP = pd.DataFrame( {f'{bldg_config}': ['ea', 1, 1, 1, 'N/A']}, index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], ).T - dl_ap = { - 'Asset': { - 'ComponentAssignmentFile': 'CMP_QNT.csv', - 'ComponentDatabase': 'Hazus Hurricane', - 'NumberOfStories': f"{gi_ap['NumberOfStories']}", - 'OccupancyType': f"{gi_ap['OccupancyClass']}", - 'PlanArea': f"{gi_ap['PlanArea']}", + DL_ap = { + "Asset": { + "ComponentAssignmentFile": "CMP_QNT.csv", + "ComponentDatabase": "Hazus Hurricane", + "NumberOfStories": f"{GI_ap['NumberOfStories']}", + "OccupancyType": f"{GI_ap['OccupancyClass']}", + "PlanArea": f"{GI_ap['PlanArea']}", }, - 'Damage': {'DamageProcess': 'Hazus Hurricane'}, - 'Demands': {}, - 'Losses': { - 'BldgRepair': { - 'ConsequenceDatabase': 'Hazus Hurricane', - 'MapApproach': 'Automatic', - 'DecisionVariables': { - 'Cost': True, - 'Carbon': False, - 'Energy': False, - 'Time': False, + "Damage": {"DamageProcess": "Hazus Hurricane"}, + "Demands": {}, + "Losses": { + "BldgRepair": { + "ConsequenceDatabase": "Hazus Hurricane", + "MapApproach": "Automatic", + "DecisionVariables": { + "Cost": True, + "Carbon": False, + "Energy": False, + "Time": False, }, } }, } - return gi_ap, dl_ap, comp + return GI_ap, DL_ap, CMP diff --git a/pelicun/tests/dl_calculation/e9/custom_pop.py b/pelicun/tests/dl_calculation/e9/custom_pop.py index bdb47947d..47727249c 100644 --- a/pelicun/tests/dl_calculation/e9/custom_pop.py +++ b/pelicun/tests/dl_calculation/e9/custom_pop.py @@ -7,19 +7,21 @@ import pandas as pd + def auto_populate(aim): """ Populates the DL model for tsunami example using custom fragility functions - Assumptions: - - Everything relevant to auto-population is provided in the Buiding - Information Model (AIM). - - The information expected in the AIM file is described in the parse_AIM - method. + Assumptions + ----------- + * Everything relevant to auto-population is provided in the + Buiding Information Model (AIM). + * The information expected in the AIM file is described in the + parse_AIM method. Parameters ---------- - AIM: dictionary + aim: dictionary Contains the information that is available about the asset and will be used to auto-populate the damage and loss model. @@ -32,27 +34,27 @@ def auto_populate(aim): """ # parse the AIM data - #print(AIM) # Look in the AIM.json file to see what you can access here + # print(aim) # Look in the AIM.json file to see what you can access here # extract the General Information GI = aim.get('GeneralInformation', None) - + # GI_ap is the 'extended AIM data - this case no extended AIM data GI_ap = GI.copy() - + # Get the number of Stories - note the column heading needs to be exactly # 'NumberOfStories'. nstories = GI_ap.get('NumberOfStories', None) if nstories is None: print("NumberOfStories attribute missing from AIM file.") return None, None, None - - # Get the fragility tag according to some building attribute; the - # NumberOfStories in this case. The fragility tag needs to be unique, i.e., - # one tag for each fragility group. The fragility tag has to match the file - # name of the json file in the 'ComponentDataFolder' (without the .json + + # Get the fragility tag according to some building attribute; the + # NumberOfStories in this case. The fragility tag needs to be unique, i.e., + # one tag for each fragility group. The fragility tag has to match the file + # name of the json file in the 'ComponentDataFolder' (without the .json # suffix) - + if nstories == 1: fragility_function_tag = 'building.1' elif nstories == 2: @@ -64,36 +66,35 @@ def auto_populate(aim): # prepare the component assignment CMP = pd.DataFrame( - {f'{fragility_function_tag}': [ 'ea', 1, 1, 1, 'N/A']}, - index = [ 'Units','Location','Direction','Theta_0','Family'] - ).T - + {f'{fragility_function_tag}': ['ea', 1, 1, 1, 'N/A']}, + index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'], + ).T + # Populate the DL_ap DL_ap = { - "Asset": { - "ComponentAssignmentFile": "CMP_QNT.csv", - "ComponentDatabase": "None", - "ComponentDatabasePath": "CustomDLDataFolder/damage_Tsunami.csv" - }, - "Damage": { - "DamageProcess": "None" - }, - "Demands": { - }, - "Losses": { - "BldgRepair": { - "ConsequenceDatabase": "None", - "ConsequenceDatabasePath": "CustomDLDataFolder/loss_repair_Tsunami.csv", - "MapApproach": "User Defined", - "MapFilePath": "CustomDLDataFolder/loss_map.csv", - "DecisionVariables": { - "Cost": True, - "Carbon": False, - "Energy": False, - "Time": False - } - } + "Asset": { + "ComponentAssignmentFile": "CMP_QNT.csv", + "ComponentDatabase": "None", + "ComponentDatabasePath": "CustomDLDataFolder/damage_Tsunami.csv", + }, + "Damage": {"DamageProcess": "None"}, + "Demands": {}, + "Losses": { + "BldgRepair": { + "ConsequenceDatabase": "None", + "ConsequenceDatabasePath": ( + "CustomDLDataFolder/loss_repair_Tsunami.csv" + ), + "MapApproach": "User Defined", + "MapFilePath": "CustomDLDataFolder/loss_map.csv", + "DecisionVariables": { + "Cost": True, + "Carbon": False, + "Energy": False, + "Time": False, + }, } - } + }, + } return GI_ap, DL_ap, CMP diff --git a/pyproject.toml b/pyproject.toml index 6350470c2..87a5def0a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,13 +27,16 @@ max-bool-expr=5 "pelicun/resources/auto/*" = ['PLR', 'T', 'N', 'ANN', 'D', 'PTH', 'INP', 'DOC', 'RET', 'TD'] "pelicun/tools/HDF_to_CSV.py" = ["ALL"] "pelicun/tests/validation/inactive/*" = ["T201", "B018", "ANN", "PD"] +"pelicun/tests/dl_calculation/e7/auto_HU_NJ.py" = ["ALL"] +"pelicun/tests/dl_calculation/e8/auto_HU_LA.py" = ["ALL"] +"pelicun/tests/dl_calculation/e9/custom_pop.py" = ["ALL"] [tool.ruff.format] quote-style = "single" [tool.codespell] ignore-words = ["ignore_words.txt"] -skip = ["*.html", "./htmlcov/*", "./doc_src/build/*", "./pelicun.egg-info/*", "./doc_src/*", "./doc/build/*", "*/rulesets/*", "custom_pop.py", "*/SimCenterDBDL/*"] +skip = ["*.html", "./htmlcov/*", "./doc_src/build/*", "./pelicun.egg-info/*", "./doc_src/*", "./doc/build/*", "*/rulesets/*", "custom_pop.py", "*/SimCenterDBDL/*", "auto_HU_NJ.py", "auto_HU_LA.py", "custom_pop.py"] [tool.mypy] ignore_missing_imports = true From b201aafa0b6f5d7723adb913a03061cbe05bc48d Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Mon, 28 Oct 2024 11:42:57 -0700 Subject: [PATCH 25/27] Use a more descriptive variable name. --- pelicun/uq.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pelicun/uq.py b/pelicun/uq.py index 8eac4c774..cde2ba3df 100644 --- a/pelicun/uq.py +++ b/pelicun/uq.py @@ -841,8 +841,8 @@ def fit_distribution_to_sample( # noqa: C901 # There is nothing to gain from a time-consuming optimization if.. # the number of samples is too small - small_n_samples = 3 - if (n_samples < small_n_samples) or ( + min_sample_size_for_optimization = 3 + if (n_samples < min_sample_size_for_optimization) or ( # there are no truncation or detection limits involved np.all(np.isnan(tr_limits)) and np.all(np.isnan(det_limits)) ): From 12d0f434aa1d9956052515f1a4c595d4a427211c Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Mon, 28 Oct 2024 11:48:07 -0700 Subject: [PATCH 26/27] Rename `raw_samples` to `raw_sample`. --- pelicun/model/demand_model.py | 2 +- pelicun/uq.py | 24 ++++++++++++------------ 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pelicun/model/demand_model.py b/pelicun/model/demand_model.py index 3b87a4032..65dc3e189 100644 --- a/pelicun/model/demand_model.py +++ b/pelicun/model/demand_model.py @@ -748,7 +748,7 @@ def get_filter_mask( ) demand_theta, demand_rho = uq.fit_distribution_to_sample( - raw_samples=demand_sample.to_numpy().T, + raw_sample=demand_sample.to_numpy().T, distribution=cal_df.loc[:, 'Family'].values, # type: ignore censored_count=censored_count, detection_limits=cal_df.loc[ # type: ignore diff --git a/pelicun/uq.py b/pelicun/uq.py index cde2ba3df..56933dac2 100644 --- a/pelicun/uq.py +++ b/pelicun/uq.py @@ -695,7 +695,7 @@ def _neg_log_likelihood( # noqa: C901 def fit_distribution_to_sample( # noqa: C901 - raw_samples: np.ndarray, + raw_sample: np.ndarray, distribution: str | list[str], truncation_limits: tuple[float, float] = (np.nan, np.nan), censored_count: int = 0, @@ -715,7 +715,7 @@ def fit_distribution_to_sample( # noqa: C901 Parameters ---------- - raw_samples: float ndarray + raw_sample: float ndarray Raw data that serves as the basis of estimation. The number of samples equals the number of columns and each row introduces a new feature. In other words: a list of sample lists is expected where each sample list @@ -724,7 +724,7 @@ def fit_distribution_to_sample( # noqa: C901 Defines the target probability distribution type. Different types of distributions can be mixed by providing a list rather than a single value. Each element of the list corresponds to one of the features in - the raw_samples. + the raw_sample. truncation_limits: float ndarray, optional, default: [None, None] Lower and/or upper truncation limits for the specified distributions. A two-element vector can be used for a univariate case, while two lists @@ -775,7 +775,7 @@ def fit_distribution_to_sample( # noqa: C901 If NaN values are produced during standard normal space transformation """ - samples = np.atleast_2d(raw_samples) + samples = np.atleast_2d(raw_sample) tr_limits = np.atleast_2d(truncation_limits) det_limits = np.atleast_2d(detection_limits) dist_list = np.atleast_1d(distribution) @@ -2007,7 +2007,7 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: class EmpiricalRandomVariable(RandomVariable): """Empirical random variable.""" - __slots__: list[str] = ['_raw_samples'] + __slots__: list[str] = ['_raw_sample'] def __init__( self, @@ -2032,7 +2032,7 @@ def __init__( msg = f'{self.distribution} RVs do not support truncation' raise NotImplementedError(msg) - self._raw_samples = np.atleast_1d(theta) + self._raw_sample = np.atleast_1d(theta) def inverse_transform(self, values: np.ndarray) -> np.ndarray: """ @@ -2057,14 +2057,14 @@ def inverse_transform(self, values: np.ndarray) -> np.ndarray: normalized positions. """ - s_ids = (values * len(self._raw_samples)).astype(int) - return self._raw_samples[s_ids] + s_ids = (values * len(self._raw_sample)).astype(int) + return self._raw_sample[s_ids] class CoupledEmpiricalRandomVariable(UtilityRandomVariable): """Coupled empirical random variable.""" - __slots__: list[str] = ['_raw_samples'] + __slots__: list[str] = ['_raw_sample'] def __init__( self, @@ -2113,7 +2113,7 @@ def __init__( msg = f'{self.distribution} RVs do not support truncation' raise NotImplementedError(msg) - self._raw_samples = np.atleast_1d(theta) + self._raw_sample = np.atleast_1d(theta) def inverse_transform(self, sample_size: int) -> np.ndarray: """ @@ -2138,9 +2138,9 @@ def inverse_transform(self, sample_size: int) -> np.ndarray: dataset. """ - raw_sample_count = len(self._raw_samples) + raw_sample_count = len(self._raw_sample) new_sample = np.tile( - self._raw_samples, int(sample_size / raw_sample_count) + 1 + self._raw_sample, int(sample_size / raw_sample_count) + 1 ) return new_sample[:sample_size] From d36706ab5d2ea5d9f00b3b1099d7216e7c35758c Mon Sep 17 00:00:00 2001 From: John Vouvakis Manousakis Date: Mon, 28 Oct 2024 11:53:07 -0700 Subject: [PATCH 27/27] Adjust configuration and code checking workflow. - Completely ignore certain ruleset files (instead of only ignoring checking certain Ruff rules). - Add check for formatting in `run_checks.sh`. --- pyproject.toml | 5 ++--- run_checks.sh | 11 ++++++++++- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 87a5def0a..8c9835cdd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,6 +2,8 @@ line-length = 85 exclude = [ "rulesets", + "pelicun/tests/dl_calculation/e7/auto_HU_NJ.py", + "pelicun/tests/dl_calculation/e8/auto_HU_LA.py", "pelicun/tests/dl_calculation/e9/custom_pop.py" ] @@ -27,9 +29,6 @@ max-bool-expr=5 "pelicun/resources/auto/*" = ['PLR', 'T', 'N', 'ANN', 'D', 'PTH', 'INP', 'DOC', 'RET', 'TD'] "pelicun/tools/HDF_to_CSV.py" = ["ALL"] "pelicun/tests/validation/inactive/*" = ["T201", "B018", "ANN", "PD"] -"pelicun/tests/dl_calculation/e7/auto_HU_NJ.py" = ["ALL"] -"pelicun/tests/dl_calculation/e8/auto_HU_LA.py" = ["ALL"] -"pelicun/tests/dl_calculation/e9/custom_pop.py" = ["ALL"] [tool.ruff.format] quote-style = "single" diff --git a/run_checks.sh b/run_checks.sh index d4a20e227..7b5487bec 100755 --- a/run_checks.sh +++ b/run_checks.sh @@ -9,12 +9,21 @@ if [ $? -ne 0 ]; then exit 1 fi +# Check formatting with ruff +echo "Checking formatting with 'ruff format --diff'." +echo +ruff format --diff +if [ $? -ne 0 ]; then + echo "ruff format failed." + exit 1 +fi + # Run ruff for linting echo "Linting with 'ruff check --fix'." echo ruff check --fix --output-format concise if [ $? -ne 0 ]; then - echo "ruff failed." + echo "ruff check failed." exit 1 fi