Skip to content

Commit

Permalink
Spell check source code.
Browse files Browse the repository at this point in the history
  • Loading branch information
ioannis-vm committed May 19, 2024
1 parent 528718d commit 8b7562e
Show file tree
Hide file tree
Showing 13 changed files with 51 additions and 51 deletions.
8 changes: 4 additions & 4 deletions pelicun/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,7 @@ def emit_warnings(self):

def warn(self, msg):
"""
Add an emit a warning immediatelly.
Add an emit a warning immediately.
Parameters
----------
Expand Down Expand Up @@ -634,7 +634,7 @@ def update_vals(update, primary, update_path, primary_path):
f'{primary_path}["{key}"] = {primary[key]}. '
f'Please revise {update_path}["{key}"].'
)
# With both being dictionaries, we recurse.
# With both being dictionaries, we use recursion.
update_vals(
update[key],
primary[key],
Expand Down Expand Up @@ -1076,7 +1076,7 @@ def str2bool(v):
value, an error is raised indicating that a boolean value was
expected.
"""
# courtesy of Maxim @ stackoverflow
# courtesy of Maxim @ Stackoverflow

if isinstance(v, bool):
return v
Expand Down Expand Up @@ -1523,7 +1523,7 @@ def stringterpolation(
) -> Callable[[np.ndarray], np.ndarray]:
"""
Turns a string of specially formatted arguments into a multilinear
interpolating funciton.
interpolating function.
Parameters
----------
Expand Down
6 changes: 3 additions & 3 deletions pelicun/db.py
Original file line number Diff line number Diff line change
Expand Up @@ -2113,7 +2113,7 @@ def create_Hazus_HU_fragility_db(
This method was developed to add a json file with metadata
accompanying `damage_DB_SimCenter_Hazus_HU_bldg.csv`. That file
contains fragility curves fitted to Hazus Hurricane data relaetd
contains fragility curves fitted to Hazus Hurricane data related
to the Hazus Hurricane Technical Manual v4.2.
Parameters
Expand Down Expand Up @@ -2213,7 +2213,7 @@ def create_Hazus_HU_fragility_db(
masonry_reinforcing = {
'1': 'Has masonry reinforcing.',
'0': 'No masonry reinforcing.',
'null': 'Unknown information on masonry reinfocing.',
'null': 'Unknown information on masonry reinforcing.',
}

roof_frame_type = {
Expand All @@ -2222,7 +2222,7 @@ def create_Hazus_HU_fragility_db(
}

wind_debris_environment = {
'A': 'Residentiao/commercial wind debris environment.',
'A': 'Residential/commercial wind debris environment.',
'B': 'Wind debris environment varies by direction.',
'C': 'Residential wind debris environment.',
'D': 'No wind debris environment.',
Expand Down
44 changes: 22 additions & 22 deletions pelicun/model/damage_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ def calculate(
.astype('int64')
)
else:
# Otherwise asume 1.00 for the number of blocks and
# Otherwise assume 1.00 for the number of blocks and
# initialize `component_blocks` using the columns of `cmp_sample`.
component_blocks = pd.DataFrame(
np.ones(self._asmnt.asset.cmp_sample.shape[1]),
Expand Down Expand Up @@ -425,7 +425,7 @@ def __init__(self, assessment: Assessment):

def _load_model_parameters(self, data):
"""
Load model parameters from a dataframe, extending those
Load model parameters from a DataFrame, extending those
already available. Parameters already defined take precedence,
i.e. redefinitions of parameters are ignored.
Expand Down Expand Up @@ -506,14 +506,14 @@ def _get_pg_batches(
The method takes as input the block_batch_size, which
specifies the maximum number of blocks per batch. The method
first checks if performance groups have been defined in the
cmp_marginal_params dataframe, and if so, it uses the 'Blocks'
cmp_marginal_params DataFrame, and if so, it uses the 'Blocks'
column as the performance group information. If performance
groups have not been defined in cmp_marginal_params, the
method uses the cmp_sample dataframe to define the performance
method uses the cmp_sample DataFrame to define the performance
groups, with each performance group having a single block.
The method then checks if the performance groups are available
in the damage parameters dataframe, and removes any
in the damage parameters DataFrame, and removes any
performance groups that are not found in the damage
parameters. The method then groups the performance groups
based on the locations and directions of the components, and
Expand All @@ -522,14 +522,14 @@ def _get_pg_batches(
batches of size specified by block_batch_size and assigns a
batch number to each group. Finally, the method groups the
performance groups by batch number, component, location, and
direction, and returns a dataframe that shows the number of
direction, and returns a DataFrame that shows the number of
blocks for each batch.
Parameters
----------
component_blocks: pd.DataFrame
Dataframe containing a singe column, `Blocks`, which lists
DataFrame containing a singe column, `Blocks`, which lists
the number of blocks for each (`cmp`-`loc`-`dir`-`uid`).
block_batch_size: int
Maximum number of components in each batch.
Expand All @@ -543,7 +543,7 @@ def _get_pg_batches(
A DataFrame indexed by batch number, component identifier,
location, direction, and unique ID, with a column
indicating the number of blocks assigned to each
batch. This dataframe facilitates the management and
batch. This DataFrame facilitates the management and
execution of damage assessment tasks by grouping
components into manageable batches based on the specified
block batch size.
Expand Down Expand Up @@ -641,7 +641,7 @@ def probabilities(self):
Returns
-------
pd.DataFrame
Dataframe with the probability of each damage state for
DataFrame with the probability of each damage state for
each component block.
"""
Expand Down Expand Up @@ -926,7 +926,7 @@ def _evaluate_damage_state(
if self._asmnt.log.verbose:
self.log.msg('Evaluating damage states...', prepend_timestamp=True)

# Create an empty dataframe with columns and index taken from
# Create an empty DataFrame with columns and index taken from
# the input capacity sample
dmg_eval = pd.DataFrame(
columns=capacity_sample.columns, index=capacity_sample.index
Expand All @@ -946,25 +946,25 @@ def _evaluate_damage_state(
[dmg_eval.loc[:1, PG_i] for PG_i in PG_list], axis=1, keys=PG_list
).columns
PG_cols.names = ['cmp', 'loc', 'dir', 'uid', 'block', 'ls']
# Create a dataframe with demand values repeated for the
# Create a DataFrame with demand values repeated for the
# number of PGs and assign the columns as PG_cols
demand_df.append(
pd.concat(
[pd.Series(demand_vals)] * len(PG_cols), axis=1, keys=PG_cols
)
)

# Concatenate all demand dataframes into a single dataframe
# Concatenate all demand DataFrames into a single DataFrame
demand_df = pd.concat(demand_df, axis=1)
# Sort the columns of the demand dataframe
# Sort the columns of the demand DataFrame
demand_df.sort_index(axis=1, inplace=True)

# Evaluate the damage exceedance by subtracting demand from
# capacity and checking if the result is less than zero
dmg_eval = (capacity_sample - demand_df) < 0

# Remove any columns with NaN values from the damage
# exceedance dataframe
# exceedance DataFrame
dmg_eval.dropna(axis=1, inplace=True)

# initialize the DataFrames that store the damage states and
Expand Down Expand Up @@ -1018,7 +1018,7 @@ def _create_dmg_RVs(self, PGB, scaling_specification=None):
The method initializes two random variable registries,
capacity_RV_reg and lsds_RV_reg, and loops through each
performance group in the input performance group batch (PGB)
dataframe. For each performance group, it retrieves the
DataFrame. For each performance group, it retrieves the
component sample and blocks and checks if the limit state is
defined for the component. If the limit state is defined, the
method gets the list of limit states and the parameters for
Expand Down Expand Up @@ -1178,7 +1178,7 @@ def map_ds(values, offset=int(ds_id + 1)):
css = 'capacity adjustment specification'
if not isinstance(value, str):
raise ValueError(
f'Invalud entry in {css}: {value}. It has to be a string. '
f'Invalid entry in {css}: {value}. It has to be a string. '
f'See docstring of DamageModel._create_dmg_RVs.'
)
capacity_adjustment_operation = value[0]
Expand Down Expand Up @@ -1504,7 +1504,7 @@ def _perform_dmg_task(self, task):
match_locations = False

# check if the source component exists in the damage state
# dataframe
# DataFrame
if source_cmp not in self.ds_sample.columns.get_level_values('cmp'):
self.log.warn(
f"Source component `{source_cmp}` in the prescribed "
Expand Down Expand Up @@ -1639,7 +1639,7 @@ def _perform_dmg_event_loc(self, source_cmp, ds_source, target_cmp, ds_target):

def _complete_ds_cols(self, dmg_sample):
"""
Completes the damage sample dataframe with all possible damage
Completes the damage sample DataFrame with all possible damage
states for each component.
Parameters
Expand Down Expand Up @@ -1682,7 +1682,7 @@ def _complete_ds_cols(self, dmg_sample):
# get the number of possible limit states
ls_list = [col for col in DP.columns.unique(level=0) if 'LS' in col]

# initialize the result dataframe
# initialize the result DataFrame
res = pd.DataFrame()

# TODO: For the code below, store the number of damage states
Expand Down Expand Up @@ -1720,7 +1720,7 @@ def _complete_ds_cols(self, dmg_sample):
],
]

# Create a dataframe where they are repeated ds_count times in the
# Create a DataFrame where they are repeated ds_count times in the
# columns. The keys put the DS id in the first level of the
# multiindexed column
cmp_headers = pd.concat(
Expand All @@ -1730,10 +1730,10 @@ def _complete_ds_cols(self, dmg_sample):
)
cmp_headers.columns.names = ['ds', *cmp_headers.columns.names[1::]]

# add these new columns to the result dataframe
# add these new columns to the result DataFrame
res = pd.concat([res, cmp_headers], axis=1)

# Fill the result dataframe with zeros and reorder its columns to have
# Fill the result DataFrame with zeros and reorder its columns to have
# the damage states at the lowest like - matching the dmg_sample input
res = pd.DataFrame(
0.0,
Expand Down
6 changes: 3 additions & 3 deletions pelicun/model/demand_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ def calibrate_model(self, config):
Parameters
----------
config: dict
A dictionary, typically read from a json file, that specifies the
A dictionary, typically read from a JSON file, that specifies the
distribution family, truncation and censoring limits, and other
settings for the calibration.
Expand Down Expand Up @@ -921,8 +921,8 @@ def turn_to_tuples(demand_cloning):

demand_cloning = turn_to_tuples(demand_cloning)

# The demand cloning confuguration should not include
# columns that are not present in the orignal sample.
# The demand cloning configuration should not include
# columns that are not present in the original sample.
warn_columns = []
for column in demand_cloning:
if column not in self.sample.columns:
Expand Down
12 changes: 6 additions & 6 deletions pelicun/model/loss_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -416,7 +416,7 @@ def apply_consequence_scaling(self, scaling_conditions, scaling_factor):
Applies a scaling factor to selected columns of the loss
samples.
The scaling conditiones are passed as a dictionary mapping
The scaling conditions are passed as a dictionary mapping
level names with their required value for the condition to be
met. It has to contain `dv` as one of its keys, defining the
decision variable where the factors should be applied. Other
Expand Down Expand Up @@ -464,7 +464,7 @@ def apply_consequence_scaling(self, scaling_conditions, scaling_factor):
if model.sample is None:
continue

# ensure the levels exist (but don't check if speicfied
# ensure the levels exist (but don't check if specified
# values exist yet)
for name in scaling_conditions:
if name not in model.sample.columns.names:
Expand Down Expand Up @@ -673,7 +673,7 @@ def _loss_map(self):
The loss map.
"""
# Retrieve the dataframe from one of the included loss models.
# Retrieve the DataFrame from one of the included loss models.
# We use a single loss map for all.
return self.ds_model._loss_map

Expand All @@ -688,7 +688,7 @@ def _loss_map(self, loss_map):
The loss map.
"""
# Add the dataframe to the included loss models.
# Add the DataFrame to the included loss models.
# We use a single loss map for all.
for model in self._loss_models:
model._loss_map = loss_map
Expand Down Expand Up @@ -781,7 +781,7 @@ def __init__(self, assessment: Assessment):

def _load_model_parameters(self, data):
"""
Load model parameters from a dataframe, extending those
Load model parameters from a DataFrame, extending those
already available. Parameters already defined take precedence,
i.e. redefinitions of parameters are ignored.
Expand Down Expand Up @@ -1481,7 +1481,7 @@ def _calculate(
loss_map = self._loss_map['Repair'].to_dict()
sample_size = len(demand_sample)

# TODO: this can be taken out and simly passed as blocks in
# TODO: this can be taken out and simply passed as blocks in
# the arguments, and cast to a dict in here. Index can be
# obtained from there.
index = [
Expand Down
2 changes: 1 addition & 1 deletion pelicun/resources/auto/Hazus_Earthquake_IM.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def convertTunnelToHAZUSclass(AIM):
elif ("Cut" in AIM["ConstructType"]) or ("Cover" in AIM["ConstructType"]):
return "HTU2"
else:
# Select HTU2 for unclassfied tunnels because it is more conservative.
# Select HTU2 for unclassified tunnels because it is more conservative.
return "HTU2"


Expand Down
6 changes: 3 additions & 3 deletions pelicun/tests/model/test_damage_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,17 +134,17 @@ def test_load_model_parameters(self, damage_model):
assert damage_model.ds_model.damage_params.empty

def test_calculate(self):
# User-facing methods are coupled with other assessment objets
# User-facing methods are coupled with other assessment objects
# and are tested in the verification examples.
pass

def test_save_sample(self):
# User-facing methods are coupled with other assessment objets
# User-facing methods are coupled with other assessment objects
# and are tested in the verification examples.
pass

def test_load_sample(self):
# User-facing methods are coupled with other assessment objets
# User-facing methods are coupled with other assessment objects
# and are tested in the verification examples.
pass

Expand Down
2 changes: 1 addition & 1 deletion pelicun/tests/model/test_loss_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -802,7 +802,7 @@ def test__calc_median_consequence(self, assessment_instance):
performance_group, loss_map, required_edps, demand_dict, cmp_sample
)
assert (
'Loss function intepolation for consequence '
'Loss function interpolation for consequence '
'`cmp.A-dv.A` has failed. Ensure a sufficient '
'interpolation domain for the X values '
'(those after the `|` symbol) and verify '
Expand Down
2 changes: 1 addition & 1 deletion pelicun/tests/model/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
# John Vouvakis Manousakis

"""
This file defines a clas used by the model unit tests.
This file defines a class used by the model unit tests.
"""

from __future__ import annotations
Expand Down
2 changes: 1 addition & 1 deletion pelicun/tests/test_assessment.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def test_assessment_get_default_metadata():

for data_source in data_sources:
# here we just test that we can load the data file, without
# checking the contens.
# checking the contents.
asmt.get_default_data(data_source)
asmt.get_default_metadata(data_source)

Expand Down
4 changes: 2 additions & 2 deletions pelicun/tests/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ def test_convert_dtypes():
check_column_type=False,
)

# Empty dataframe
# Empty DataFrame

df_input = pd.DataFrame({})
df_expected = pd.DataFrame({})
Expand Down Expand Up @@ -626,7 +626,7 @@ def test_describe():
)

# case 1:
# passing a dataframe
# passing a DataFrame

df = pd.DataFrame(
((1.00, 2.00, 3.00), (4.00, 5.00, 6.00)), columns=['A', 'B', 'C']
Expand Down
Loading

0 comments on commit 8b7562e

Please sign in to comment.