diff --git a/build.py b/build.py
index f551efe9c..797f7c772 100644
--- a/build.py
+++ b/build.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python # noqa: EXE001, D100
from bincrafters import build_template_default
diff --git a/conanfile.py b/conanfile.py
index c91ba1164..8ea9a94c9 100644
--- a/conanfile.py
+++ b/conanfile.py
@@ -1,30 +1,30 @@
-import os
+import os # noqa: D100
from conans import CMake, ConanFile
-class simCenterBackendApps(ConanFile):
+class simCenterBackendApps(ConanFile): # noqa: D101
name = 'SimCenterBackendApplications'
version = '1.2.2'
description = 'Backend applications for SimCenter software'
license = 'BSD 3-Clause'
author = 'Michael Gardner mhgardner@berkeley.edu'
url = 'https://github.com/NHERI-SimCenter/SimCenterBackendApplications'
- settings = {
+ settings = { # noqa: RUF012
'os': None,
'build_type': None,
'compiler': None,
'arch': ['x86_64', 'armv8'],
}
- options = {'shared': [True, False]}
- default_options = {
+ options = {'shared': [True, False]} # noqa: RUF012
+ default_options = { # noqa: RUF012
'mkl-static:threaded': False,
'ipp-static:simcenter_backend': True,
'libcurl:with_ssl': 'openssl',
}
generators = 'cmake'
build_policy = 'missing'
- requires = [
+ requires = [ # noqa: RUF012
'jansson/2.13.1',
'zlib/1.2.11',
'libcurl/8.1.1',
@@ -40,30 +40,30 @@ class simCenterBackendApps(ConanFile):
_build_subfolder = 'build_subfolder'
# Set short paths for Windows
short_paths = True
- scm = {
+ scm = { # noqa: RUF012
'type': 'git', # Use "type": "svn", if local repo is managed using SVN
'subfolder': _source_subfolder,
'url': 'auto',
'revision': 'auto',
}
- def configure(self):
+ def configure(self): # noqa: D102
self.options.shared = False
- def configure_cmake(self):
+ def configure_cmake(self): # noqa: D102
cmake = CMake(self)
cmake.configure(source_folder=self._source_subfolder)
return cmake
- def build(self):
+ def build(self): # noqa: D102
cmake = self.configure_cmake()
cmake.build()
- def package(self):
+ def package(self): # noqa: D102
self.copy(pattern='LICENSE', dst='licenses', src=self._source_subfolder)
cmake = self.configure_cmake()
cmake.install()
self.copy('*', dst='bin', src=self._source_subfolder + '/applications')
- def package_info(self):
- self.env_info.PATH.append(os.path.join(self.package_folder, 'bin'))
+ def package_info(self): # noqa: D102
+ self.env_info.PATH.append(os.path.join(self.package_folder, 'bin')) # noqa: PTH118
diff --git a/modules/Workflow/AggregateResults.py b/modules/Workflow/AggregateResults.py
index a8c592b4b..cc4920c9c 100644
--- a/modules/Workflow/AggregateResults.py
+++ b/modules/Workflow/AggregateResults.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -46,14 +46,14 @@
import pandas as pd
-def log_msg(msg):
- print(
- '{} {}'.format(datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S:%fZ')[:-4], msg)
+def log_msg(msg): # noqa: D103
+ print( # noqa: T201
+ '{} {}'.format(datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S:%fZ')[:-4], msg) # noqa: DTZ003
)
-def main(threads=1):
- headers = dict(
+def main(threads=1): # noqa: C901, D103
+ headers = dict( # noqa: C408
IM=[0, 1, 2, 3],
BIM=[
0,
@@ -85,13 +85,13 @@ def read_csv_np(file, header):
data[data == ''] = np.nan
tuples = [tuple(h) for h in res[:first_row].T[1:]]
- MI = pd.MultiIndex.from_tuples(tuples, names=res[:first_row].T[0])
+ MI = pd.MultiIndex.from_tuples(tuples, names=res[:first_row].T[0]) # noqa: N806
- df = pd.DataFrame(
+ df = pd.DataFrame( # noqa: PD901
data, columns=MI, index=res[first_row:].T[0], dtype=float
)
- return df
+ return df # noqa: RET504
@delayed
def read_csv_files_np(file_list, header):
@@ -106,7 +106,7 @@ def read_csv_files_np(file_list, header):
for res_type in ['IM', 'BIM', 'EDP', 'DM', 'DV']:
log_msg(f'Loading {res_type} files...')
- files = glob.glob(f'./results/{res_type}/*/{res_type}_*.csv')
+ files = glob.glob(f'./results/{res_type}/*/{res_type}_*.csv') # noqa: PTH207
# files = files[:1000]
if len(files) > 0:
@@ -115,7 +115,7 @@ def read_csv_files_np(file_list, header):
chunk = math.ceil(file_count / threads)
df_list = []
- print(f'Creating threads for {file_count} files...')
+ print(f'Creating threads for {file_count} files...') # noqa: T201
for t_i in range(threads):
# print(t_i)
@@ -152,7 +152,7 @@ def read_csv_files_np(file_list, header):
log_msg('Concatenating all files')
df_all = pd.concat(df_list, axis=0, sort=False)
- df_all.sort_index(axis=0, inplace=True)
+ df_all.sort_index(axis=0, inplace=True) # noqa: PD002
# save the results
log_msg('Saving results')
@@ -168,7 +168,7 @@ def read_csv_files_np(file_list, header):
# df_all.to_csv('{}.csv'.format(res_type))
else:
- print(f'No {res_type} files found')
+ print(f'No {res_type} files found') # noqa: T201
if use_dask:
log_msg('Closing cluster...')
@@ -178,7 +178,7 @@ def read_csv_files_np(file_list, header):
# aggregate the realizations files
log_msg('Aggregating individual realizations...')
- files = glob.glob(
+ files = glob.glob( # noqa: PTH207
'./results/{}/*/{}_*.hdf'.format('realizations', 'realizations')
)
@@ -199,7 +199,7 @@ def read_csv_files_np(file_list, header):
df_all.index = df_all.index.astype(np.int32)
- df_all.sort_index(axis=0, inplace=True)
+ df_all.sort_index(axis=0, inplace=True) # noqa: PD002
try:
df_all.astype(np.float32).to_hdf(
@@ -210,7 +210,7 @@ def read_csv_files_np(file_list, header):
complevel=1,
complib='blosc:blosclz',
)
- except:
+ except: # noqa: E722
df_all.to_hdf(
'realizations.hdf',
key,
@@ -228,7 +228,7 @@ def read_csv_files_np(file_list, header):
if __name__ == '__main__':
# Defining the command line arguments
- workflowArgParser = argparse.ArgumentParser('Aggregate the results from rWHALE.')
+ workflowArgParser = argparse.ArgumentParser('Aggregate the results from rWHALE.') # noqa: N816
workflowArgParser.add_argument(
'-threads',
diff --git a/modules/Workflow/CreateWorkflowJobs.py b/modules/Workflow/CreateWorkflowJobs.py
index 4e7f30d11..636f71fb2 100644
--- a/modules/Workflow/CreateWorkflowJobs.py
+++ b/modules/Workflow/CreateWorkflowJobs.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -46,22 +46,22 @@
import numpy as np
-def generate_workflow_tasks(
+def generate_workflow_tasks( # noqa: C901, D103
bldg_filter,
config_file,
out_dir,
task_size,
- rWHALE_dir,
+ rWHALE_dir, # noqa: N803
):
- jobId = os.getenv('SLURM_JOB_ID') # We might need this later
+ jobId = os.getenv('SLURM_JOB_ID') # We might need this later # noqa: N806, F841
# get the type of outputs requested
- with open(f'{rWHALE_dir}/{config_file}') as f:
+ with open(f'{rWHALE_dir}/{config_file}') as f: # noqa: PTH123
settings = json.load(f)
output_types = [
out_type
for out_type, val in settings['outputs'].items()
- if val == True
+ if val == True # noqa: E712
]
# KZ@220324: check if regional site response is requested
@@ -82,8 +82,8 @@ def generate_workflow_tasks(
)
if bldg_filter == '':
- raise ValueError(
- 'Running a regional simulation on DesignSafe requires either '
+ raise ValueError( # noqa: TRY003
+ 'Running a regional simulation on DesignSafe requires either ' # noqa: EM101
"the 'buildingFilter' parameter to be set for the workflow "
"application or the 'filter' parameter set for the Building "
'application in the workflow configuration file. Neither was '
@@ -104,9 +104,9 @@ def generate_workflow_tasks(
count = len(bldgs_requested)
- tasksCount = int(math.ceil(count / task_size))
+ tasksCount = int(math.ceil(count / task_size)) # noqa: N806
- workflowScript = f'/tmp/{rWHALE_dir}/applications/Workflow/rWHALE.py'
+ workflowScript = f'/tmp/{rWHALE_dir}/applications/Workflow/rWHALE.py' # noqa: S108, N806
subfolder = 0
for i in range(tasksCount):
@@ -114,8 +114,8 @@ def generate_workflow_tasks(
# do not try to run sims if there are no bldgs to run
if len(bldg_list) > 0:
- min_ID = bldg_list[0]
- max_ID = bldg_list[-1]
+ min_ID = bldg_list[0] # noqa: N806
+ max_ID = bldg_list[-1] # noqa: N806
max_ids = np.where(np.diff(bldg_list) != 1)[0]
max_ids = np.append(
@@ -128,19 +128,19 @@ def generate_workflow_tasks(
min_ids = np.zeros(max_ids.shape, dtype=int)
min_ids[1:] = max_ids[:-1] + 1
- filter = ''
+ filter = '' # noqa: A001
for i_min, i_max in zip(min_ids, max_ids):
if i_min == i_max:
- filter += f',{bldg_list[i_min]}'
+ filter += f',{bldg_list[i_min]}' # noqa: A001
else:
- filter += f',{bldg_list[i_min]}-{bldg_list[i_max]}'
- filter = filter[1:] # to remove the initial comma
+ filter += f',{bldg_list[i_min]}-{bldg_list[i_max]}' # noqa: A001
+ filter = filter[1:] # to remove the initial comma # noqa: A001
if (i % 500) == 0:
subfolder = subfolder + 1
run_dir = (
- f'/tmp/{rWHALE_dir}'
+ f'/tmp/{rWHALE_dir}' # noqa: S108
f'/applications/Workflow/RunDir{min_ID}-{max_ID}'
)
@@ -156,7 +156,7 @@ def generate_workflow_tasks(
# run the simulation
task_list += (
f'python3 {workflowScript} '
- f'/tmp/{rWHALE_dir}/{config_file} '
+ f'/tmp/{rWHALE_dir}/{config_file} ' # noqa: S108
f'-d /tmp/{rWHALE_dir}/input_data '
f'-w {run_dir} -l {log_path} '
f'--filter {filter} '
@@ -188,26 +188,26 @@ def generate_workflow_tasks(
task_list += f'rm -rf {run_dir} \n'
# write the tasks to the output file
- with open('WorkflowJobs.txt', 'a+') as tasksFile:
+ with open('WorkflowJobs.txt', 'a+') as tasksFile: # noqa: PTH123, N806
tasksFile.write(task_list)
-def generate_workflow_tasks_siteresponse(
+def generate_workflow_tasks_siteresponse( # noqa: D103
bldg_filter,
config_file,
out_dir,
task_size,
- rWHALE_dir,
+ rWHALE_dir, # noqa: N803
):
- jobId = os.getenv('SLURM_JOB_ID') # We might need this later
+ jobId = os.getenv('SLURM_JOB_ID') # We might need this later # noqa: N806, F841
# get the type of outputs requested
- with open(f'{rWHALE_dir}/{config_file}') as f:
+ with open(f'{rWHALE_dir}/{config_file}') as f: # noqa: PTH123
settings = json.load(f)
- output_types = [
+ output_types = [ # noqa: F841
out_type
for out_type, val in settings['outputs'].items()
- if val == True
+ if val == True # noqa: E712
]
# get the list of buildings requested to run
@@ -218,8 +218,8 @@ def generate_workflow_tasks_siteresponse(
)
if bldg_filter == '':
- raise ValueError(
- 'Running a regional simulation on DesignSafe requires either '
+ raise ValueError( # noqa: TRY003
+ 'Running a regional simulation on DesignSafe requires either ' # noqa: EM101
"the 'buildingFilter' parameter to be set for the workflow "
"application or the 'filter' parameter set for the Building "
'application in the workflow configuration file. Neither was '
@@ -240,12 +240,12 @@ def generate_workflow_tasks_siteresponse(
count = len(bldgs_requested)
- tasksCount = int(math.ceil(count / task_size))
+ tasksCount = int(math.ceil(count / task_size)) # noqa: N806
- print(f'tasksCount = {tasksCount}')
+ print(f'tasksCount = {tasksCount}') # noqa: T201
- workflowScript = (
- f'/tmp/{rWHALE_dir}/applications/Workflow/SiteResponse_workflow.py'
+ workflowScript = ( # noqa: N806
+ f'/tmp/{rWHALE_dir}/applications/Workflow/SiteResponse_workflow.py' # noqa: S108
)
subfolder = 0
@@ -254,8 +254,8 @@ def generate_workflow_tasks_siteresponse(
# do not try to run sims if there are no bldgs to run
if len(bldg_list) > 0:
- min_ID = bldg_list[0]
- max_ID = bldg_list[-1]
+ min_ID = bldg_list[0] # noqa: N806
+ max_ID = bldg_list[-1] # noqa: N806
max_ids = np.where(np.diff(bldg_list) != 1)[0]
max_ids = np.append(
@@ -268,19 +268,19 @@ def generate_workflow_tasks_siteresponse(
min_ids = np.zeros(max_ids.shape, dtype=int)
min_ids[1:] = max_ids[:-1] + 1
- filter = ''
+ filter = '' # noqa: A001
for i_min, i_max in zip(min_ids, max_ids):
if i_min == i_max:
- filter += f',{bldg_list[i_min]}'
+ filter += f',{bldg_list[i_min]}' # noqa: A001
else:
- filter += f',{bldg_list[i_min]}-{bldg_list[i_max]}'
- filter = filter[1:] # to remove the initial comma
+ filter += f',{bldg_list[i_min]}-{bldg_list[i_max]}' # noqa: A001
+ filter = filter[1:] # to remove the initial comma # noqa: A001
if (i % 500) == 0:
subfolder = subfolder + 1
run_dir = (
- f'/tmp/{rWHALE_dir}'
+ f'/tmp/{rWHALE_dir}' # noqa: S108
f'/applications/Workflow/RunDir{min_ID}-{max_ID}'
)
@@ -296,7 +296,7 @@ def generate_workflow_tasks_siteresponse(
# run the simulation
task_list += (
f'python3 {workflowScript} '
- f'/tmp/{rWHALE_dir}/{config_file} '
+ f'/tmp/{rWHALE_dir}/{config_file} ' # noqa: S108
f'-d /tmp/{rWHALE_dir}/input_data '
f'-w {run_dir} -l {log_path} '
f'--filter {filter} && '
@@ -315,29 +315,29 @@ def generate_workflow_tasks_siteresponse(
task_list += "echo 'cmd generated. Currend dir: '$PWD \n"
# write the tasks to the output file
- with open('WorkflowJobs_siteResponse.txt', 'a+') as tasksFile:
+ with open('WorkflowJobs_siteResponse.txt', 'a+') as tasksFile: # noqa: PTH123, N806
tasksFile.write(task_list)
-def generate_workflow_tasks_regionalsiteresponse(
+def generate_workflow_tasks_regionalsiteresponse( # noqa: C901, D103
site_filter,
config_file,
out_dir,
task_size,
- rWHALE_dir,
+ rWHALE_dir, # noqa: N803
):
- jobId = os.getenv('SLURM_JOB_ID') # We might need this later
+ jobId = os.getenv('SLURM_JOB_ID') # We might need this later # noqa: N806, F841
# KZ@220324: currently only EDP is valid output as it's just soil column response in this step
output_valid = ['IM']
# get the type of outputs requested
- with open(f'{rWHALE_dir}/{config_file}') as f:
+ with open(f'{rWHALE_dir}/{config_file}') as f: # noqa: PTH123
settings = json.load(f)
output_types = [
out_type
for out_type, val in settings['outputs'].items()
- if (val == True and out_type in output_valid)
+ if (val == True and out_type in output_valid) # noqa: E712
]
# get the list of sites requested to run
@@ -348,8 +348,8 @@ def generate_workflow_tasks_regionalsiteresponse(
].get('filter', '')
if site_filter == '':
- raise ValueError(
- 'Running a regional simulation on DesignSafe requires either '
+ raise ValueError( # noqa: TRY003
+ 'Running a regional simulation on DesignSafe requires either ' # noqa: EM101
"the 'buildingFilter' parameter to be set for the workflow "
"application or the 'filter' parameter set for the Building "
'application in the workflow configuration file. Neither was '
@@ -370,9 +370,9 @@ def generate_workflow_tasks_regionalsiteresponse(
count = len(sites_requested)
- tasksCount = int(math.ceil(count / task_size))
+ tasksCount = int(math.ceil(count / task_size)) # noqa: N806
- workflowScript = f'/tmp/{rWHALE_dir}/applications/Workflow/siteResponseWHALE.py'
+ workflowScript = f'/tmp/{rWHALE_dir}/applications/Workflow/siteResponseWHALE.py' # noqa: S108, N806
subfolder = 0
for i in range(tasksCount):
@@ -380,8 +380,8 @@ def generate_workflow_tasks_regionalsiteresponse(
# do not try to run sims if there are no bldgs to run
if len(site_list) > 0:
- min_ID = site_list[0]
- max_ID = site_list[-1]
+ min_ID = site_list[0] # noqa: N806
+ max_ID = site_list[-1] # noqa: N806
max_ids = np.where(np.diff(site_list) != 1)[0]
max_ids = np.append(
@@ -394,19 +394,19 @@ def generate_workflow_tasks_regionalsiteresponse(
min_ids = np.zeros(max_ids.shape, dtype=int)
min_ids[1:] = max_ids[:-1] + 1
- filter = ''
+ filter = '' # noqa: A001
for i_min, i_max in zip(min_ids, max_ids):
if i_min == i_max:
- filter += f',{site_list[i_min]}'
+ filter += f',{site_list[i_min]}' # noqa: A001
else:
- filter += f',{site_list[i_min]}-{site_list[i_max]}'
- filter = filter[1:] # to remove the initial comma
+ filter += f',{site_list[i_min]}-{site_list[i_max]}' # noqa: A001
+ filter = filter[1:] # to remove the initial comma # noqa: A001
if (i % 500) == 0:
subfolder = subfolder + 1
run_dir = (
- f'/tmp/{rWHALE_dir}'
+ f'/tmp/{rWHALE_dir}' # noqa: S108
f'/applications/Workflow/RunDirSite{min_ID}-{max_ID}'
)
@@ -455,14 +455,14 @@ def generate_workflow_tasks_regionalsiteresponse(
task_list += f'rm -rf {run_dir} \n'
# write the tasks to the output file
- with open('WorkflowJobs_SiteResponse.txt', 'a+') as tasksFile:
+ with open('WorkflowJobs_SiteResponse.txt', 'a+') as tasksFile: # noqa: PTH123, N806
tasksFile.write(task_list)
if __name__ == '__main__':
# Defining the command line arguments
- workflowArgParser = argparse.ArgumentParser(
+ workflowArgParser = argparse.ArgumentParser( # noqa: N816
'Create the workflow tasks for rWHALE.'
)
@@ -530,4 +530,4 @@ def generate_workflow_tasks_regionalsiteresponse(
)
else:
# currently supporting building and siteresponse
- print('-workflowName has to be building or siteResponse')
+ print('-workflowName has to be building or siteResponse') # noqa: T201
diff --git a/modules/Workflow/EE-UQ workflow.py b/modules/Workflow/EE-UQ workflow.py
index 32d48c3f2..5fc01a120 100644
--- a/modules/Workflow/EE-UQ workflow.py
+++ b/modules/Workflow/EE-UQ workflow.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -46,25 +46,25 @@
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
import whale.main as whale
from whale.main import log_div, log_msg
-def main(run_type, input_file, app_registry):
+def main(run_type, input_file, app_registry): # noqa: D103
# initialize the log file
- with open(input_file) as f:
+ with open(input_file) as f: # noqa: PTH123
inputs = json.load(f)
- runDir = inputs['runDir']
+ runDir = inputs['runDir'] # noqa: N806
whale.log_file = runDir + '/log.txt'
- with open(whale.log_file, 'w') as f:
+ with open(whale.log_file, 'w') as f: # noqa: PTH123
f.write('EE-UQ workflow\n')
# echo the inputs
@@ -72,7 +72,7 @@ def main(run_type, input_file, app_registry):
log_msg('Started running the workflow script')
log_msg(log_div)
- WF = whale.Workflow(
+ WF = whale.Workflow( # noqa: N806
run_type,
input_file,
app_registry,
@@ -93,13 +93,13 @@ def main(run_type, input_file, app_registry):
if __name__ == '__main__':
- if len(sys.argv) != 4:
- print('\nNeed three arguments, e.g.:\n')
- print(
- ' python %s action workflowinputfile.json workflowapplications.json'
+ if len(sys.argv) != 4: # noqa: PLR2004
+ print('\nNeed three arguments, e.g.:\n') # noqa: T201
+ print( # noqa: T201
+ ' python %s action workflowinputfile.json workflowapplications.json' # noqa: UP031
% sys.argv[0]
)
- print('\nwhere: action is either check or run\n')
- exit(1)
+ print('\nwhere: action is either check or run\n') # noqa: T201
+ exit(1) # noqa: PLR1722
main(run_type=sys.argv[1], input_file=sys.argv[2], app_registry=sys.argv[3])
diff --git a/modules/Workflow/EE-UQ.py b/modules/Workflow/EE-UQ.py
index 8b26f676a..0387884d7 100755
--- a/modules/Workflow/EE-UQ.py
+++ b/modules/Workflow/EE-UQ.py
@@ -1,11 +1,11 @@
-# written: fmk, adamzs
+# written: fmk, adamzs # noqa: EXE002, INP001, D100
# import functions for Python 2.X support
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
@@ -17,78 +17,78 @@
divider = '#' * 80
log_output = []
-from WorkflowUtils import *
+from WorkflowUtils import * # noqa: E402, F403
-def main(run_type, inputFile, applicationsRegistry):
+def main(run_type, inputFile, applicationsRegistry): # noqa: C901, N803, D103, PLR0912, PLR0915
# the whole workflow is wrapped within a 'try' block.
# a number of exceptions (files missing, explicit application failures, etc.) are
# handled explicitly to aid the user.
# But unhandled exceptions case the workflow to stop with an error, handled in the
# exception block way at the bottom of this main() function
try:
- workflow_log(divider)
- workflow_log('Start of run')
- workflow_log(divider)
- workflow_log('workflow input file: %s' % inputFile)
- workflow_log('application registry file: %s' % applicationsRegistry)
- workflow_log('runtype: %s' % run_type)
- workflow_log(divider)
+ workflow_log(divider) # noqa: F405
+ workflow_log('Start of run') # noqa: F405
+ workflow_log(divider) # noqa: F405
+ workflow_log('workflow input file: %s' % inputFile) # noqa: F405, UP031
+ workflow_log('application registry file: %s' % applicationsRegistry) # noqa: F405, UP031
+ workflow_log('runtype: %s' % run_type) # noqa: F405, UP031
+ workflow_log(divider) # noqa: F405
#
# first we parse the applications registry to load all possible applications
# - for each application type we place in a dictionary key being name, value containing path to executable
#
- with open(applicationsRegistry) as data_file:
- registryData = json.load(data_file)
+ with open(applicationsRegistry) as data_file: # noqa: PTH123
+ registryData = json.load(data_file) # noqa: N806
# convert all relative paths to full paths
- A = 'Applications'
- Applications = dict()
- appList = 'Event Modeling EDP Simulation UQ'.split(' ')
- appList = [a + A for a in appList]
+ A = 'Applications' # noqa: N806
+ Applications = dict() # noqa: C408, N806
+ appList = 'Event Modeling EDP Simulation UQ'.split(' ') # noqa: N806
+ appList = [a + A for a in appList] # noqa: N806
for app_type in appList:
if app_type in registryData:
- xApplicationData = registryData[app_type]
- applicationsData = xApplicationData['Applications']
+ xApplicationData = registryData[app_type] # noqa: N806
+ applicationsData = xApplicationData['Applications'] # noqa: N806
for app in applicationsData:
- appName = app['Name']
- appExe = app['ExecutablePath']
+ appName = app['Name'] # noqa: N806
+ appExe = app['ExecutablePath'] # noqa: N806
if app_type not in Applications:
- Applications[app_type] = dict()
+ Applications[app_type] = dict() # noqa: C408
Applications[app_type][appName] = appExe
#
# open input file, and parse json into data
#
- with open(inputFile) as data_file:
+ with open(inputFile) as data_file: # noqa: PTH123
data = json.load(data_file)
# convert all relative paths to full paths
# relative2fullpath(data)
if 'runDir' in data:
- runDIR = data['runDir']
+ runDIR = data['runDir'] # noqa: N806
else:
- raise WorkFlowInputError('Need a runDir Entry')
+ raise WorkFlowInputError('Need a runDir Entry') # noqa: EM101, F405, TRY003, TRY301
if 'remoteAppDir' in data:
- remoteAppDir = data['remoteAppDir']
+ remoteAppDir = data['remoteAppDir'] # noqa: N806
else:
- raise WorkFlowInputError('Need a remoteAppDir Entry')
+ raise WorkFlowInputError('Need a remoteAppDir Entry') # noqa: EM101, F405, TRY003, TRY301
if 'localAppDir' in data:
- localAppDir = data['localAppDir']
+ localAppDir = data['localAppDir'] # noqa: N806
else:
- raise WorkFlowInputError('Need a localAppDir Entry')
+ raise WorkFlowInputError('Need a localAppDir Entry') # noqa: EM101, F405, TRY003, TRY301
#
# before running chdir to templatedir
#
- workflow_log('run Directory: %s' % runDIR)
+ workflow_log('run Directory: %s' % runDIR) # noqa: F405, UP031
os.chdir(runDIR)
os.chdir('templatedir')
@@ -100,7 +100,7 @@ def main(run_type, inputFile, applicationsRegistry):
if 'Applications' in data:
available_apps = data['Applications']
else:
- raise WorkFlowInputError('Need an Applications Entry')
+ raise WorkFlowInputError('Need an Applications Entry') # noqa: EM101, F405, TRY003, TRY301
#
# get events, for each the application and its data .. FOR NOW 1 EVENT
@@ -111,175 +111,175 @@ def main(run_type, inputFile, applicationsRegistry):
for event in events:
if 'EventClassification' in event:
- eventClassification = event['EventClassification']
+ eventClassification = event['EventClassification'] # noqa: N806
if eventClassification == 'Earthquake':
if 'Application' in event:
- eventApplication = event['Application']
- eventAppData = event['ApplicationData']
- eventData = event['ApplicationData']
+ eventApplication = event['Application'] # noqa: N806
+ eventAppData = event['ApplicationData'] # noqa: N806
+ eventData = event['ApplicationData'] # noqa: N806, F841
if (
- eventApplication
+ eventApplication # noqa: SIM118
in Applications['EventApplications'].keys()
):
- eventAppExe = Applications['EventApplications'].get(
+ eventAppExe = Applications['EventApplications'].get( # noqa: N806
eventApplication
)
- workflow_log(remoteAppDir)
- workflow_log(eventAppExe)
- eventAppExeLocal = posixpath.join(
+ workflow_log(remoteAppDir) # noqa: F405
+ workflow_log(eventAppExe) # noqa: F405
+ eventAppExeLocal = posixpath.join( # noqa: N806
localAppDir, eventAppExe
)
- eventAppExeRemote = posixpath.join(
+ eventAppExeRemote = posixpath.join( # noqa: N806
remoteAppDir, eventAppExe
)
- workflow_log(eventAppExeRemote)
+ workflow_log(eventAppExeRemote) # noqa: F405
else:
- raise WorkFlowInputError(
- 'Event application %s not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Event application %s not in registry' # noqa: UP031
% eventApplication
)
else:
- raise WorkFlowInputError(
- 'Need an EventApplication section'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need an EventApplication section' # noqa: EM101
)
else:
- raise WorkFlowInputError(
- 'Event classification must be Earthquake, not %s'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Event classification must be Earthquake, not %s' # noqa: UP031
% eventClassification
)
else:
- raise WorkFlowInputError('Need Event Classification')
+ raise WorkFlowInputError('Need Event Classification') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need an Events Entry in Applications')
+ raise WorkFlowInputError('Need an Events Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
#
# get modeling application and its data
#
if 'Modeling' in available_apps:
- modelingApp = available_apps['Modeling']
+ modelingApp = available_apps['Modeling'] # noqa: N806
if 'Application' in modelingApp:
- modelingApplication = modelingApp['Application']
+ modelingApplication = modelingApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- modelingAppData = modelingApp['ApplicationData']
+ modelingAppData = modelingApp['ApplicationData'] # noqa: N806
if (
- modelingApplication
+ modelingApplication # noqa: SIM118
in Applications['ModelingApplications'].keys()
):
- modelingAppExe = Applications['ModelingApplications'].get(
+ modelingAppExe = Applications['ModelingApplications'].get( # noqa: N806
modelingApplication
)
- modelingAppExeLocal = posixpath.join(localAppDir, modelingAppExe)
- modelingAppExeRemote = posixpath.join(
+ modelingAppExeLocal = posixpath.join(localAppDir, modelingAppExe) # noqa: N806
+ modelingAppExeRemote = posixpath.join( # noqa: N806
remoteAppDir, modelingAppExe
)
else:
- raise WorkFlowInputError(
- 'Modeling application %s not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Modeling application %s not in registry' # noqa: UP031
% modelingApplication
)
else:
- raise WorkFlowInputError(
- 'Need a ModelingApplication in Modeling data'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need a ModelingApplication in Modeling data' # noqa: EM101
)
else:
- raise WorkFlowInputError('Need a Modeling Entry in Applications')
+ raise WorkFlowInputError('Need a Modeling Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
#
# get edp application and its data .. CURRENTLY MODELING APP MUST CREATE EDP
#
if 'EDP' in available_apps:
- edpApp = available_apps['EDP']
+ edpApp = available_apps['EDP'] # noqa: N806
if 'Application' in edpApp:
- edpApplication = edpApp['Application']
+ edpApplication = edpApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- edpAppData = edpApp['ApplicationData']
- if edpApplication in Applications['EDPApplications'].keys():
- edpAppExe = Applications['EDPApplications'].get(edpApplication)
- edpAppExeLocal = posixpath.join(localAppDir, edpAppExe)
- edpAppExeRemote = posixpath.join(remoteAppDir, edpAppExe)
+ edpAppData = edpApp['ApplicationData'] # noqa: N806
+ if edpApplication in Applications['EDPApplications'].keys(): # noqa: SIM118
+ edpAppExe = Applications['EDPApplications'].get(edpApplication) # noqa: N806
+ edpAppExeLocal = posixpath.join(localAppDir, edpAppExe) # noqa: N806
+ edpAppExeRemote = posixpath.join(remoteAppDir, edpAppExe) # noqa: N806
else:
- raise WorkFlowInputError(
- f'EDP application {edpApplication} not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ f'EDP application {edpApplication} not in registry' # noqa: EM102
)
else:
- raise WorkFlowInputError('Need an EDPApplication in EDP data')
+ raise WorkFlowInputError('Need an EDPApplication in EDP data') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need an EDP Entry in Applications')
+ raise WorkFlowInputError('Need an EDP Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
#
# get simulation application and its data
#
if 'Simulation' in available_apps:
- simulationApp = available_apps['Simulation']
+ simulationApp = available_apps['Simulation'] # noqa: N806
if 'Application' in simulationApp:
- simulationApplication = simulationApp['Application']
+ simulationApplication = simulationApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- simAppData = simulationApp['ApplicationData']
+ simAppData = simulationApp['ApplicationData'] # noqa: N806
if (
- simulationApplication
+ simulationApplication # noqa: SIM118
in Applications['SimulationApplications'].keys()
):
- simAppExe = Applications['SimulationApplications'].get(
+ simAppExe = Applications['SimulationApplications'].get( # noqa: N806
simulationApplication
)
- simAppExeLocal = posixpath.join(localAppDir, simAppExe)
- simAppExeRemote = posixpath.join(remoteAppDir, simAppExe)
+ simAppExeLocal = posixpath.join(localAppDir, simAppExe) # noqa: N806
+ simAppExeRemote = posixpath.join(remoteAppDir, simAppExe) # noqa: N806
else:
- raise WorkFlowInputError(
- f'Simulation application {simulationApplication} not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ f'Simulation application {simulationApplication} not in registry' # noqa: EM102
)
else:
- raise WorkFlowInputError(
- 'Need an SimulationApplication in Simulation data'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need an SimulationApplication in Simulation data' # noqa: EM101
)
else:
- raise WorkFlowInputError('Need a Simulation Entry in Applications')
+ raise WorkFlowInputError('Need a Simulation Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
if 'UQ' in available_apps:
- uqApp = available_apps['UQ']
+ uqApp = available_apps['UQ'] # noqa: N806
if 'Application' in uqApp:
- uqApplication = uqApp['Application']
+ uqApplication = uqApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- uqAppData = uqApp['ApplicationData']
- if uqApplication in Applications['UQApplications'].keys():
- uqAppExe = Applications['UQApplications'].get(uqApplication)
- uqAppExeLocal = posixpath.join(localAppDir, uqAppExe)
- uqAppExeRemote = posixpath.join(localAppDir, uqAppExe)
+ uqAppData = uqApp['ApplicationData'] # noqa: N806
+ if uqApplication in Applications['UQApplications'].keys(): # noqa: SIM118
+ uqAppExe = Applications['UQApplications'].get(uqApplication) # noqa: N806
+ uqAppExeLocal = posixpath.join(localAppDir, uqAppExe) # noqa: N806
+ uqAppExeRemote = posixpath.join(localAppDir, uqAppExe) # noqa: N806, F841
else:
- raise WorkFlowInputError(
- f'UQ application {uqApplication} not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ f'UQ application {uqApplication} not in registry' # noqa: EM102
)
else:
- raise WorkFlowInputError('Need a UQApplication in UQ data')
+ raise WorkFlowInputError('Need a UQApplication in UQ data') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need a Simulation Entry in Applications')
+ raise WorkFlowInputError('Need a Simulation Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
- workflow_log('SUCCESS: Parsed workflow input')
- workflow_log(divider)
+ workflow_log('SUCCESS: Parsed workflow input') # noqa: F405
+ workflow_log(divider) # noqa: F405
#
# now invoke the applications
@@ -292,18 +292,18 @@ def main(run_type, inputFile, applicationsRegistry):
# - perform Simulation
# - getDL
- bimFILE = 'dakota.json'
- eventFILE = 'EVENT.json'
- samFILE = 'SAM.json'
- edpFILE = 'EDP.json'
- simFILE = 'SIM.json'
- driverFile = 'driver'
+ bimFILE = 'dakota.json' # noqa: N806
+ eventFILE = 'EVENT.json' # noqa: N806
+ samFILE = 'SAM.json' # noqa: N806
+ edpFILE = 'EDP.json' # noqa: N806
+ simFILE = 'SIM.json' # noqa: N806
+ driverFile = 'driver' # noqa: N806
# open driver file & write building app (minus the --getRV) to it
- driverFILE = open(driverFile, 'w')
+ driverFILE = open(driverFile, 'w') # noqa: SIM115, PTH123, N806
# get RV for event
- eventAppDataList = [
+ eventAppDataList = [ # noqa: N806
f'"{eventAppExeRemote}"',
'--filenameBIM',
bimFILE,
@@ -313,13 +313,13 @@ def main(run_type, inputFile, applicationsRegistry):
if eventAppExe.endswith('.py'):
eventAppDataList.insert(0, 'python')
- for key in eventAppData.keys():
+ for key in eventAppData.keys(): # noqa: SIM118
eventAppDataList.append('--' + key)
value = eventAppData.get(key)
eventAppDataList.append('' + str(value))
for item in eventAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
eventAppDataList.append('--getRV')
@@ -328,11 +328,11 @@ def main(run_type, inputFile, applicationsRegistry):
else:
eventAppDataList[0] = '' + eventAppExeLocal
- command, result, returncode = runApplication(eventAppDataList)
+ command, result, returncode = runApplication(eventAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# get RV for building model
- modelAppDataList = [
+ modelAppDataList = [ # noqa: N806
f'"{modelingAppExeRemote}"',
'--filenameBIM',
bimFILE,
@@ -345,12 +345,12 @@ def main(run_type, inputFile, applicationsRegistry):
if modelingAppExe.endswith('.py'):
modelAppDataList.insert(0, 'python')
- for key in modelingAppData.keys():
+ for key in modelingAppData.keys(): # noqa: SIM118
modelAppDataList.append('--' + key)
modelAppDataList.append('' + modelingAppData.get(key))
for item in modelAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
modelAppDataList.append('--getRV')
@@ -360,11 +360,11 @@ def main(run_type, inputFile, applicationsRegistry):
else:
modelAppDataList[0] = modelingAppExeLocal
- command, result, returncode = runApplication(modelAppDataList)
+ command, result, returncode = runApplication(modelAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# get RV for EDP!
- edpAppDataList = [
+ edpAppDataList = [ # noqa: N806
f'"{edpAppExeRemote}"',
'--filenameBIM',
bimFILE,
@@ -379,12 +379,12 @@ def main(run_type, inputFile, applicationsRegistry):
if edpAppExe.endswith('.py'):
edpAppDataList.insert(0, 'python')
- for key in edpAppData.keys():
+ for key in edpAppData.keys(): # noqa: SIM118
edpAppDataList.append('--' + key)
edpAppDataList.append('' + edpAppData.get(key))
for item in edpAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
if edpAppExe.endswith('.py'):
@@ -393,11 +393,11 @@ def main(run_type, inputFile, applicationsRegistry):
edpAppDataList[0] = edpAppExeLocal
edpAppDataList.append('--getRV')
- command, result, returncode = runApplication(edpAppDataList)
+ command, result, returncode = runApplication(edpAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# get RV for Simulation
- simAppDataList = [
+ simAppDataList = [ # noqa: N806
f'"{simAppExeRemote}"',
'--filenameBIM',
bimFILE,
@@ -414,12 +414,12 @@ def main(run_type, inputFile, applicationsRegistry):
if simAppExe.endswith('.py'):
simAppDataList.insert(0, 'python')
- for key in simAppData.keys():
+ for key in simAppData.keys(): # noqa: SIM118
simAppDataList.append('--' + key)
simAppDataList.append('' + simAppData.get(key))
for item in simAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
simAppDataList.append('--getRV')
@@ -428,13 +428,13 @@ def main(run_type, inputFile, applicationsRegistry):
else:
simAppDataList[0] = simAppExeLocal
- command, result, returncode = runApplication(simAppDataList)
+ command, result, returncode = runApplication(simAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# perform the simulation
driverFILE.close()
- uqAppDataList = [
+ uqAppDataList = [ # noqa: N806
f'"{uqAppExeLocal}"',
'--filenameBIM',
bimFILE,
@@ -457,7 +457,7 @@ def main(run_type, inputFile, applicationsRegistry):
uqAppDataList.append('--runType')
uqAppDataList.append(run_type)
- for key in uqAppData.keys():
+ for key in uqAppData.keys(): # noqa: SIM118
uqAppDataList.append('--' + key)
value = uqAppData.get(key)
if isinstance(value, string_types):
@@ -465,50 +465,50 @@ def main(run_type, inputFile, applicationsRegistry):
else:
uqAppDataList.append('' + str(value))
- if run_type == 'run' or run_type == 'set_up' or run_type == 'runningRemote':
- workflow_log('Running Simulation...')
- workflow_log(' '.join(uqAppDataList))
- command, result, returncode = runApplication(uqAppDataList)
+ if run_type == 'run' or run_type == 'set_up' or run_type == 'runningRemote': # noqa: PLR1714
+ workflow_log('Running Simulation...') # noqa: F405
+ workflow_log(' '.join(uqAppDataList)) # noqa: F405
+ command, result, returncode = runApplication(uqAppDataList) # noqa: F405
log_output.append([command, result, returncode])
- workflow_log('Simulation ended...')
+ workflow_log('Simulation ended...') # noqa: F405
else:
- workflow_log('Setup run only. No simulation performed.')
+ workflow_log('Setup run only. No simulation performed.') # noqa: F405
- except WorkFlowInputError as e:
- print('workflow error: %s' % e.value)
- workflow_log('workflow error: %s' % e.value)
- workflow_log(divider)
- exit(1)
+ except WorkFlowInputError as e: # noqa: F405
+ print('workflow error: %s' % e.value) # noqa: T201, UP031
+ workflow_log('workflow error: %s' % e.value) # noqa: F405, UP031
+ workflow_log(divider) # noqa: F405
+ exit(1) # noqa: PLR1722
# unhandled exceptions are handled here
except Exception:
- print('workflow error: ', sys.exc_info()[0])
- workflow_log('unhandled exception... exiting')
+ print('workflow error: ', sys.exc_info()[0]) # noqa: T201
+ workflow_log('unhandled exception... exiting') # noqa: F405
raise
if __name__ == '__main__':
- if len(sys.argv) != 4:
- print('\nNeed three arguments, e.g.:\n')
- print(
- ' python %s action workflowinputfile.json workflowapplications.json'
+ if len(sys.argv) != 4: # noqa: PLR2004
+ print('\nNeed three arguments, e.g.:\n') # noqa: T201
+ print( # noqa: T201
+ ' python %s action workflowinputfile.json workflowapplications.json' # noqa: UP031
% sys.argv[0]
)
- print('\nwhere: action is either check or run\n')
- exit(1)
+ print('\nwhere: action is either check or run\n') # noqa: T201
+ exit(1) # noqa: PLR1722
run_type = sys.argv[1]
- inputFile = sys.argv[2]
- applicationsRegistry = sys.argv[3]
+ inputFile = sys.argv[2] # noqa: N816
+ applicationsRegistry = sys.argv[3] # noqa: N816
main(run_type, inputFile, applicationsRegistry)
- workflow_log_file = 'workflow-log-%s.txt' % (
+ workflow_log_file = 'workflow-log-%s.txt' % ( # noqa: UP031
strftime('%Y-%m-%d-%H-%M-%S-utc', gmtime())
)
- log_filehandle = open(workflow_log_file, 'w')
+ log_filehandle = open(workflow_log_file, 'w') # noqa: SIM115, PTH123
- print(type(log_filehandle))
+ print(type(log_filehandle)) # noqa: T201
print(divider, file=log_filehandle)
print('Start of Log', file=log_filehandle)
print(divider, file=log_filehandle)
@@ -516,13 +516,13 @@ def main(run_type, inputFile, applicationsRegistry):
# nb: log_output is a global variable, defined at the top of this script.
for result in log_output:
print(divider, file=log_filehandle)
- print('command line:\n%s\n' % result[0], file=log_filehandle)
+ print('command line:\n%s\n' % result[0], file=log_filehandle) # noqa: UP031
print(divider, file=log_filehandle)
- print('output from process:\n%s\n' % result[1], file=log_filehandle)
+ print('output from process:\n%s\n' % result[1], file=log_filehandle) # noqa: UP031
print(divider, file=log_filehandle)
print('End of Log', file=log_filehandle)
print(divider, file=log_filehandle)
- workflow_log('Log file: %s' % workflow_log_file)
- workflow_log('End of run.')
+ workflow_log('Log file: %s' % workflow_log_file) # noqa: F405, UP031
+ workflow_log('End of run.') # noqa: F405
diff --git a/modules/Workflow/GMT.py b/modules/Workflow/GMT.py
index e34ca1ad3..0f9e9e198 100755
--- a/modules/Workflow/GMT.py
+++ b/modules/Workflow/GMT.py
@@ -1,11 +1,11 @@
-# written: fmk, adamzs
+# written: fmk, adamzs # noqa: EXE002, INP001, D100
# import functions for Python 2.X support
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
@@ -17,78 +17,78 @@
divider = '#' * 80
log_output = []
-from WorkflowUtils import *
+from WorkflowUtils import * # noqa: E402, F403
-def main(run_type, inputFile, applicationsRegistry):
+def main(run_type, inputFile, applicationsRegistry): # noqa: C901, N803, D103, PLR0912, PLR0915
# the whole workflow is wrapped within a 'try' block.
# a number of exceptions (files missing, explicit application failures, etc.) are
# handled explicitly to aid the user.
# But unhandled exceptions case the workflow to stop with an error, handled in the
# exception block way at the bottom of this main() function
try:
- workflow_log(divider)
- workflow_log('Start of run')
- workflow_log(divider)
- workflow_log('workflow input file: %s' % inputFile)
- workflow_log('application registry file: %s' % applicationsRegistry)
- workflow_log('runtype: %s' % run_type)
- workflow_log(divider)
+ workflow_log(divider) # noqa: F405
+ workflow_log('Start of run') # noqa: F405
+ workflow_log(divider) # noqa: F405
+ workflow_log('workflow input file: %s' % inputFile) # noqa: F405, UP031
+ workflow_log('application registry file: %s' % applicationsRegistry) # noqa: F405, UP031
+ workflow_log('runtype: %s' % run_type) # noqa: F405, UP031
+ workflow_log(divider) # noqa: F405
#
# first we parse the applications registry to load all possible applications
# - for each application type we place in a dictionary key being name, value containing path to executable
#
- with open(applicationsRegistry) as data_file:
- registryData = json.load(data_file)
+ with open(applicationsRegistry) as data_file: # noqa: PTH123
+ registryData = json.load(data_file) # noqa: N806
# convert all relative paths to full paths
- A = 'Applications'
- Applications = dict()
- appList = 'Event Modeling EDP Simulation UQ'.split(' ')
- appList = [a + A for a in appList]
+ A = 'Applications' # noqa: N806
+ Applications = dict() # noqa: C408, N806
+ appList = 'Event Modeling EDP Simulation UQ'.split(' ') # noqa: N806
+ appList = [a + A for a in appList] # noqa: N806
for app_type in appList:
if app_type in registryData:
- xApplicationData = registryData[app_type]
- applicationsData = xApplicationData['Applications']
+ xApplicationData = registryData[app_type] # noqa: N806
+ applicationsData = xApplicationData['Applications'] # noqa: N806
for app in applicationsData:
- appName = app['Name']
- appExe = app['ExecutablePath']
+ appName = app['Name'] # noqa: N806
+ appExe = app['ExecutablePath'] # noqa: N806
if app_type not in Applications:
- Applications[app_type] = dict()
+ Applications[app_type] = dict() # noqa: C408
Applications[app_type][appName] = appExe
#
# open input file, and parse json into data
#
- with open(inputFile) as data_file:
+ with open(inputFile) as data_file: # noqa: PTH123
data = json.load(data_file)
# convert all relative paths to full paths
# relative2fullpath(data)
if 'runDir' in data:
- runDIR = data['runDir']
+ runDIR = data['runDir'] # noqa: N806
else:
- raise WorkFlowInputError('Need a runDir Entry')
+ raise WorkFlowInputError('Need a runDir Entry') # noqa: EM101, F405, TRY003, TRY301
if 'remoteAppDir' in data:
- remoteAppDir = data['remoteAppDir']
+ remoteAppDir = data['remoteAppDir'] # noqa: N806
else:
- raise WorkFlowInputError('Need a remoteAppDir Entry')
+ raise WorkFlowInputError('Need a remoteAppDir Entry') # noqa: EM101, F405, TRY003, TRY301
if 'localAppDir' in data:
- localAppDir = data['localAppDir']
+ localAppDir = data['localAppDir'] # noqa: N806
else:
- raise WorkFlowInputError('Need a localAppDir Entry')
+ raise WorkFlowInputError('Need a localAppDir Entry') # noqa: EM101, F405, TRY003, TRY301
#
# before running chdir to templatedir
#
- workflow_log('run Directory: %s' % runDIR)
+ workflow_log('run Directory: %s' % runDIR) # noqa: F405, UP031
os.chdir(runDIR)
os.chdir('templatedir')
@@ -100,7 +100,7 @@ def main(run_type, inputFile, applicationsRegistry):
if 'Applications' in data:
available_apps = data['Applications']
else:
- raise WorkFlowInputError('Need an Applications Entry')
+ raise WorkFlowInputError('Need an Applications Entry') # noqa: EM101, F405, TRY003, TRY301
#
# get events, for each the application and its data .. FOR NOW 1 EVENT
@@ -111,130 +111,130 @@ def main(run_type, inputFile, applicationsRegistry):
for event in events:
if 'EventClassification' in event:
- eventClassification = event['EventClassification']
+ eventClassification = event['EventClassification'] # noqa: N806
if eventClassification == 'Earthquake':
if 'Application' in event:
- eventApplication = event['Application']
- eventAppData = event['ApplicationData']
- eventData = event['ApplicationData']
+ eventApplication = event['Application'] # noqa: N806
+ eventAppData = event['ApplicationData'] # noqa: N806
+ eventData = event['ApplicationData'] # noqa: N806, F841
if (
- eventApplication
+ eventApplication # noqa: SIM118
in Applications['EventApplications'].keys()
):
- eventAppExe = Applications['EventApplications'].get(
+ eventAppExe = Applications['EventApplications'].get( # noqa: N806
eventApplication
)
- workflow_log(remoteAppDir)
- workflow_log(eventAppExe)
- eventAppExeLocal = posixpath.join(
+ workflow_log(remoteAppDir) # noqa: F405
+ workflow_log(eventAppExe) # noqa: F405
+ eventAppExeLocal = posixpath.join( # noqa: N806
localAppDir, eventAppExe
)
- eventAppExeRemote = posixpath.join(
+ eventAppExeRemote = posixpath.join( # noqa: N806
remoteAppDir, eventAppExe
)
- workflow_log(eventAppExeRemote)
+ workflow_log(eventAppExeRemote) # noqa: F405
else:
- raise WorkFlowInputError(
- 'Event application %s not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Event application %s not in registry' # noqa: UP031
% eventApplication
)
else:
- raise WorkFlowInputError(
- 'Need an EventApplication section'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need an EventApplication section' # noqa: EM101
)
else:
- raise WorkFlowInputError(
- 'Event classification must be Earthquake, not %s'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Event classification must be Earthquake, not %s' # noqa: UP031
% eventClassification
)
else:
- raise WorkFlowInputError('Need Event Classification')
+ raise WorkFlowInputError('Need Event Classification') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need an Events Entry in Applications')
+ raise WorkFlowInputError('Need an Events Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
if 'EDP' in available_apps:
- edpApp = available_apps['EDP']
+ edpApp = available_apps['EDP'] # noqa: N806
if 'Application' in edpApp:
- edpApplication = edpApp['Application']
+ edpApplication = edpApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- edpAppData = edpApp['ApplicationData']
- if edpApplication in Applications['EDPApplications'].keys():
- edpAppExe = Applications['EDPApplications'].get(edpApplication)
- edpAppExeLocal = posixpath.join(localAppDir, edpAppExe)
- edpAppExeRemote = posixpath.join(remoteAppDir, edpAppExe)
+ edpAppData = edpApp['ApplicationData'] # noqa: N806
+ if edpApplication in Applications['EDPApplications'].keys(): # noqa: SIM118
+ edpAppExe = Applications['EDPApplications'].get(edpApplication) # noqa: N806
+ edpAppExeLocal = posixpath.join(localAppDir, edpAppExe) # noqa: N806
+ edpAppExeRemote = posixpath.join(remoteAppDir, edpAppExe) # noqa: N806
else:
- raise WorkFlowInputError(
- f'EDP application {edpApplication} not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ f'EDP application {edpApplication} not in registry' # noqa: EM102
)
else:
- raise WorkFlowInputError('Need an EDPApplication in EDP data')
+ raise WorkFlowInputError('Need an EDPApplication in EDP data') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need an EDP Entry in Applications')
+ raise WorkFlowInputError('Need an EDP Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
if 'Simulation' in available_apps:
- simulationApp = available_apps['Simulation']
+ simulationApp = available_apps['Simulation'] # noqa: N806
if 'Application' in simulationApp:
- simulationApplication = simulationApp['Application']
+ simulationApplication = simulationApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- simAppData = simulationApp['ApplicationData']
+ simAppData = simulationApp['ApplicationData'] # noqa: N806
if (
- simulationApplication
+ simulationApplication # noqa: SIM118
in Applications['SimulationApplications'].keys()
):
- simAppExe = Applications['SimulationApplications'].get(
+ simAppExe = Applications['SimulationApplications'].get( # noqa: N806
simulationApplication
)
- simAppExeLocal = posixpath.join(localAppDir, simAppExe)
- simAppExeRemote = posixpath.join(remoteAppDir, simAppExe)
+ simAppExeLocal = posixpath.join(localAppDir, simAppExe) # noqa: N806
+ simAppExeRemote = posixpath.join(remoteAppDir, simAppExe) # noqa: N806
else:
- raise WorkFlowInputError(
- f'Simulation application {simulationApplication} not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ f'Simulation application {simulationApplication} not in registry' # noqa: EM102
)
else:
- raise WorkFlowInputError(
- 'Need an SimulationApplication in Simulation data'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need an SimulationApplication in Simulation data' # noqa: EM101
)
else:
- raise WorkFlowInputError('Need a Simulation Entry in Applications')
+ raise WorkFlowInputError('Need a Simulation Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
if 'UQ' in available_apps:
- uqApp = available_apps['UQ']
+ uqApp = available_apps['UQ'] # noqa: N806
if 'Application' in uqApp:
- uqApplication = uqApp['Application']
+ uqApplication = uqApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- uqAppData = uqApp['ApplicationData']
- if uqApplication in Applications['UQApplications'].keys():
- uqAppExe = Applications['UQApplications'].get(uqApplication)
- uqAppExeLocal = posixpath.join(localAppDir, uqAppExe)
- uqAppExeRemote = posixpath.join(localAppDir, uqAppExe)
+ uqAppData = uqApp['ApplicationData'] # noqa: N806
+ if uqApplication in Applications['UQApplications'].keys(): # noqa: SIM118
+ uqAppExe = Applications['UQApplications'].get(uqApplication) # noqa: N806
+ uqAppExeLocal = posixpath.join(localAppDir, uqAppExe) # noqa: N806
+ uqAppExeRemote = posixpath.join(localAppDir, uqAppExe) # noqa: N806, F841
else:
- raise WorkFlowInputError(
- f'UQ application {uqApplication} not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ f'UQ application {uqApplication} not in registry' # noqa: EM102
)
else:
- raise WorkFlowInputError('Need a UQApplication in UQ data')
+ raise WorkFlowInputError('Need a UQApplication in UQ data') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need a Simulation Entry in Applications')
+ raise WorkFlowInputError('Need a Simulation Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
- workflow_log('SUCCESS: Parsed workflow input')
- workflow_log(divider)
+ workflow_log('SUCCESS: Parsed workflow input') # noqa: F405
+ workflow_log(divider) # noqa: F405
#
# now invoke the applications
@@ -247,16 +247,16 @@ def main(run_type, inputFile, applicationsRegistry):
# - perform Simulation
# - getDL
- inputFILE = 'dakota.json'
- eventFILE = 'EVENT.json'
- edpFILE = 'EDP.json'
- driverFile = 'driver'
+ inputFILE = 'dakota.json' # noqa: N806
+ eventFILE = 'EVENT.json' # noqa: N806
+ edpFILE = 'EDP.json' # noqa: N806
+ driverFile = 'driver' # noqa: N806
# open driver file & write building app (minus the --getRV) to it
- driverFILE = open(driverFile, 'w')
+ driverFILE = open(driverFile, 'w') # noqa: SIM115, PTH123, N806
# get RV for event
- eventAppDataList = [
+ eventAppDataList = [ # noqa: N806
f'"{eventAppExeRemote}"',
'--filenameBIM',
inputFILE,
@@ -266,13 +266,13 @@ def main(run_type, inputFile, applicationsRegistry):
if eventAppExe.endswith('.py'):
eventAppDataList.insert(0, 'python')
- for key in eventAppData.keys():
+ for key in eventAppData.keys(): # noqa: SIM118
eventAppDataList.append('--' + key)
value = eventAppData.get(key)
eventAppDataList.append('' + value)
for item in eventAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
eventAppDataList.append('--getRV')
@@ -281,10 +281,10 @@ def main(run_type, inputFile, applicationsRegistry):
else:
eventAppDataList[0] = '' + eventAppExeLocal
- command, result, returncode = runApplication(eventAppDataList)
+ command, result, returncode = runApplication(eventAppDataList) # noqa: F405
log_output.append([command, result, returncode])
- edpAppDataList = [
+ edpAppDataList = [ # noqa: N806
f'"{edpAppExeRemote}"',
'--filenameBIM',
inputFILE,
@@ -299,12 +299,12 @@ def main(run_type, inputFile, applicationsRegistry):
if edpAppExe.endswith('.py'):
edpAppDataList.insert(0, 'python')
- for key in edpAppData.keys():
+ for key in edpAppData.keys(): # noqa: SIM118
edpAppDataList.append('--' + key)
edpAppDataList.append('' + edpAppData.get(key))
for item in edpAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
if edpAppExe.endswith('.py'):
@@ -313,11 +313,11 @@ def main(run_type, inputFile, applicationsRegistry):
edpAppDataList[0] = edpAppExeLocal
edpAppDataList.append('--getRV')
- command, result, returncode = runApplication(edpAppDataList)
+ command, result, returncode = runApplication(edpAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# get RV for Simulation
- simAppDataList = [
+ simAppDataList = [ # noqa: N806
f'"{simAppExeRemote}"',
'--filenameBIM',
inputFILE,
@@ -334,12 +334,12 @@ def main(run_type, inputFile, applicationsRegistry):
if simAppExe.endswith('.py'):
simAppDataList.insert(0, 'python')
- for key in simAppData.keys():
+ for key in simAppData.keys(): # noqa: SIM118
simAppDataList.append('--' + key)
simAppDataList.append('' + simAppData.get(key))
for item in simAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
simAppDataList.append('--getRV')
@@ -348,14 +348,14 @@ def main(run_type, inputFile, applicationsRegistry):
else:
simAppDataList[0] = simAppExeLocal
- command, result, returncode = runApplication(simAppDataList)
+ command, result, returncode = runApplication(simAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# perform the simulation
driverFILE.close()
- print('HELLO')
+ print('HELLO') # noqa: T201
- uqAppDataList = [
+ uqAppDataList = [ # noqa: N806
f'"{uqAppExeLocal}"',
'--filenameBIM',
inputFILE,
@@ -373,7 +373,7 @@ def main(run_type, inputFile, applicationsRegistry):
# uqAppDataList = ['"{}"'.format(uqAppExeLocal), '--filenameBIM', inputFILE, '--filenameEVENT', eventFILE, '--driverFile', driverFile]
- print(uqAppDataList)
+ print(uqAppDataList) # noqa: T201
if uqAppExe.endswith('.py'):
uqAppDataList.insert(0, 'python')
@@ -382,9 +382,9 @@ def main(run_type, inputFile, applicationsRegistry):
uqAppDataList.append('--runType')
uqAppDataList.append(run_type)
- print(uqAppDataList)
+ print(uqAppDataList) # noqa: T201
- for key in uqAppData.keys():
+ for key in uqAppData.keys(): # noqa: SIM118
uqAppDataList.append('--' + key)
value = uqAppData.get(key)
if isinstance(value, string_types):
@@ -392,50 +392,50 @@ def main(run_type, inputFile, applicationsRegistry):
else:
uqAppDataList.append('' + str(value))
- if run_type == 'run' or run_type == 'set_up':
- workflow_log('Running Simulation...')
- workflow_log(' '.join(uqAppDataList))
- command, result, returncode = runApplication(uqAppDataList)
+ if run_type == 'run' or run_type == 'set_up': # noqa: PLR1714
+ workflow_log('Running Simulation...') # noqa: F405
+ workflow_log(' '.join(uqAppDataList)) # noqa: F405
+ command, result, returncode = runApplication(uqAppDataList) # noqa: F405
log_output.append([command, result, returncode])
- workflow_log('Simulation ended...')
+ workflow_log('Simulation ended...') # noqa: F405
else:
- workflow_log('Setup run only. No simulation performed.')
+ workflow_log('Setup run only. No simulation performed.') # noqa: F405
- except WorkFlowInputError as e:
- print('workflow error: %s' % e.value)
- workflow_log('workflow error: %s' % e.value)
- workflow_log(divider)
- exit(1)
+ except WorkFlowInputError as e: # noqa: F405
+ print('workflow error: %s' % e.value) # noqa: T201, UP031
+ workflow_log('workflow error: %s' % e.value) # noqa: F405, UP031
+ workflow_log(divider) # noqa: F405
+ exit(1) # noqa: PLR1722
# unhandled exceptions are handled here
except Exception:
# print('workflow error: %s' % e.value)
- workflow_log('unhandled exception... exiting')
+ workflow_log('unhandled exception... exiting') # noqa: F405
raise
if __name__ == '__main__':
- if len(sys.argv) != 4:
- print('\nNeed three arguments, e.g.:\n')
- print(
- ' python %s action workflowinputfile.json workflowapplications.json'
+ if len(sys.argv) != 4: # noqa: PLR2004
+ print('\nNeed three arguments, e.g.:\n') # noqa: T201
+ print( # noqa: T201
+ ' python %s action workflowinputfile.json workflowapplications.json' # noqa: UP031
% sys.argv[0]
)
- print('\nwhere: action is either check or run\n')
- exit(1)
+ print('\nwhere: action is either check or run\n') # noqa: T201
+ exit(1) # noqa: PLR1722
run_type = sys.argv[1]
- inputFile = sys.argv[2]
- applicationsRegistry = sys.argv[3]
+ inputFile = sys.argv[2] # noqa: N816
+ applicationsRegistry = sys.argv[3] # noqa: N816
main(run_type, inputFile, applicationsRegistry)
- workflow_log_file = 'workflow-log-%s.txt' % (
+ workflow_log_file = 'workflow-log-%s.txt' % ( # noqa: UP031
strftime('%Y-%m-%d-%H-%M-%S-utc', gmtime())
)
- log_filehandle = open(workflow_log_file, 'w')
+ log_filehandle = open(workflow_log_file, 'w') # noqa: SIM115, PTH123
- print(type(log_filehandle))
+ print(type(log_filehandle)) # noqa: T201
print(divider, file=log_filehandle)
print('Start of Log', file=log_filehandle)
print(divider, file=log_filehandle)
@@ -443,13 +443,13 @@ def main(run_type, inputFile, applicationsRegistry):
# nb: log_output is a global variable, defined at the top of this script.
for result in log_output:
print(divider, file=log_filehandle)
- print('command line:\n%s\n' % result[0], file=log_filehandle)
+ print('command line:\n%s\n' % result[0], file=log_filehandle) # noqa: UP031
print(divider, file=log_filehandle)
- print('output from process:\n%s\n' % result[1], file=log_filehandle)
+ print('output from process:\n%s\n' % result[1], file=log_filehandle) # noqa: UP031
print(divider, file=log_filehandle)
print('End of Log', file=log_filehandle)
print(divider, file=log_filehandle)
- workflow_log('Log file: %s' % workflow_log_file)
- workflow_log('End of run.')
+ workflow_log('Log file: %s' % workflow_log_file) # noqa: F405, UP031
+ workflow_log('End of run.') # noqa: F405
diff --git a/modules/Workflow/MultiModelApplication.py b/modules/Workflow/MultiModelApplication.py
index 4e8f2b79b..3d8461f4d 100755
--- a/modules/Workflow/MultiModelApplication.py
+++ b/modules/Workflow/MultiModelApplication.py
@@ -1,4 +1,4 @@
-#
+# # noqa: EXE002, INP001, D100
# Copyright (c) 2019 The Regents of the University of California
#
# This file is part of the SimCenter Backend Applications.
@@ -40,7 +40,7 @@
import os
import sys
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
from whale.main import (
_parse_app_registry,
@@ -49,64 +49,64 @@
)
-def main(
- inputFile,
- appKey,
- getRV,
- samFile,
- evtFile,
- edpFile,
- simFile,
- registryFile,
- appDir,
+def main( # noqa: C901, D103
+ inputFile, # noqa: N803
+ appKey, # noqa: N803
+ getRV, # noqa: N803
+ samFile, # noqa: ARG001, N803
+ evtFile, # noqa: ARG001, N803
+ edpFile, # noqa: ARG001, N803
+ simFile, # noqa: ARG001, N803
+ registryFile, # noqa: N803
+ appDir, # noqa: N803
):
#
# get some dir paths, load input file and get data for app, appKey
#
- inputDir = os.path.dirname(inputFile)
- inputFileName = os.path.basename(inputFile)
+ inputDir = os.path.dirname(inputFile) # noqa: PTH120, N806
+ inputFileName = os.path.basename(inputFile) # noqa: PTH119, N806
if inputDir != '':
os.chdir(inputDir)
- with open(inputFileName) as f:
+ with open(inputFileName) as f: # noqa: PTH123
inputs = json.load(f)
- if 'referenceDir' in inputs:
+ if 'referenceDir' in inputs: # noqa: SIM401
reference_dir = inputs['referenceDir']
else:
reference_dir = inputDir
- appData = {}
+ appData = {} # noqa: N806
if appKey in inputs:
- appData = inputs[appKey]
+ appData = inputs[appKey] # noqa: N806
else:
- raise KeyError(
- f'No data for "{appKey}" application in the input file "{inputFile}"'
+ raise KeyError( # noqa: TRY003
+ f'No data for "{appKey}" application in the input file "{inputFile}"' # noqa: EM102
)
- eventApp = False
+ eventApp = False # noqa: N806
if appKey == 'Events':
- eventApp = True
- appData = appData[0]
+ eventApp = True # noqa: N806, F841
+ appData = appData[0] # noqa: N806
- print('appKEY: ', appKey)
- print('appDATA: ', appData)
- print('HELLO ')
+ print('appKEY: ', appKey) # noqa: T201
+ print('appDATA: ', appData) # noqa: T201
+ print('HELLO ') # noqa: T201
if 'models' not in appData:
- print('NO models in: ', appData)
- raise KeyError(
- f'"models" not defined in data for "{appKey}" application in the input file "{inputFile}'
+ print('NO models in: ', appData) # noqa: T201
+ raise KeyError( # noqa: TRY003
+ f'"models" not defined in data for "{appKey}" application in the input file "{inputFile}' # noqa: EM102
)
- if len(appData['models']) < 2:
- raise RuntimeError(
- f'At least two models must be provided if the multimodel {appKey} application is used'
+ if len(appData['models']) < 2: # noqa: PLR2004
+ raise RuntimeError( # noqa: TRY003
+ f'At least two models must be provided if the multimodel {appKey} application is used' # noqa: EM102
)
models = appData['models']
- modelToRun = appData['modelToRun']
+ modelToRun = appData['modelToRun'] # noqa: N806
if not getRV:
#
@@ -114,37 +114,37 @@ def main(
#
if isinstance(modelToRun, str):
- rvName = 'MultiModel-' + appKey
+ rvName = 'MultiModel-' + appKey # noqa: N806
# if not here, try opening params.in and getting var from there
- with open('params.in') as params:
+ with open('params.in') as params: # noqa: PTH123
# Read the file line by line
for line in params:
values = line.strip().split()
- print(values)
+ print(values) # noqa: T201
if values[0] == rvName:
- modelToRun = values[1]
+ modelToRun = values[1] # noqa: N806
- modelToRun = int(float(modelToRun))
+ modelToRun = int(float(modelToRun)) # noqa: N806
- appsInMultiModel = []
- appDataInMultiModel = []
- appRunDataInMultiModel = []
+ appsInMultiModel = [] # noqa: N806
+ appDataInMultiModel = [] # noqa: N806
+ appRunDataInMultiModel = [] # noqa: N806
beliefs = []
- sumBeliefs = 0
+ sumBeliefs = 0 # noqa: N806
- numModels = 0
+ numModels = 0 # noqa: N806
for model in models:
belief = model['belief']
- appName = model['Application']
- appData = model['ApplicationData']
- appRunData = model['data']
+ appName = model['Application'] # noqa: N806
+ appData = model['ApplicationData'] # noqa: N806
+ appRunData = model['data'] # noqa: N806
beliefs.append(belief)
- sumBeliefs = sumBeliefs + belief
+ sumBeliefs = sumBeliefs + belief # noqa: N806
appsInMultiModel.append(appName)
appDataInMultiModel.append(appData)
appRunDataInMultiModel.append(appRunData)
- numModels = numModels + 1
+ numModels = numModels + 1 # noqa: N806
for i in range(numModels):
beliefs[i] = beliefs[i] / sumBeliefs
@@ -155,16 +155,16 @@ def main(
#
if appKey == 'Events':
- appTypes = ['Event']
+ appTypes = ['Event'] # noqa: N806
else:
- appTypes = [appKey]
+ appTypes = [appKey] # noqa: N806
- parsedRegistry = _parse_app_registry(registryFile, appTypes)
+ parsedRegistry = _parse_app_registry(registryFile, appTypes) # noqa: N806
if appKey == 'Events':
- appsRegistry = parsedRegistry[0]['Event']
+ appsRegistry = parsedRegistry[0]['Event'] # noqa: N806
else:
- appsRegistry = parsedRegistry[0][appKey]
+ appsRegistry = parsedRegistry[0][appKey] # noqa: N806
#
# now we run the application
@@ -173,7 +173,7 @@ def main(
#
if getRV:
- print('MultiModel - getRV')
+ print('MultiModel - getRV') # noqa: T201
#
# launch each application with getRV and add any new RandomVariable
@@ -182,8 +182,8 @@ def main(
#
for i in range(numModels):
- appName = appsInMultiModel[i]
- print('appsRegistry:', appsRegistry)
+ appName = appsInMultiModel[i] # noqa: N806
+ print('appsRegistry:', appsRegistry) # noqa: T201
application = appsRegistry[appName]
application.set_pref(appDataInMultiModel[i], reference_dir)
@@ -200,12 +200,12 @@ def main(
# for NOW, add RV to input file
#
- randomVariables = inputs['randomVariables']
- rvName = 'MultiModel-' + appKey
- rvValue = 'RV.MultiModel-' + appKey
+ randomVariables = inputs['randomVariables'] # noqa: N806
+ rvName = 'MultiModel-' + appKey # noqa: N806
+ rvValue = 'RV.MultiModel-' + appKey # noqa: N806
# nrv = len(randomVariables)
- thisRV = {
+ thisRV = { # noqa: N806
'distribution': 'Discrete',
'inputType': 'Parameters',
'name': rvName,
@@ -229,10 +229,10 @@ def main(
# newCorrMat[0:nrv,0:nrv] = corrMat
# inputs['correlationMatrix'] = newCorrMat.flatten().tolist()
- with open(inputFile, 'w') as outfile:
+ with open(inputFile, 'w') as outfile: # noqa: PTH123
json.dump(inputs, outfile)
- print('UPDATING INPUT FILE:', inputFile)
+ print('UPDATING INPUT FILE:', inputFile) # noqa: T201
#
# for now just run the last model (works in sWHALE for all apps that don't create RV, i.e. events)
@@ -240,31 +240,31 @@ def main(
# create input file for application
- tmpFile = 'MultiModel.' + appKey + '.json'
+ tmpFile = 'MultiModel.' + appKey + '.json' # noqa: N806
inputs[appKey] = appRunDataInMultiModel[numModels - 1]
- with open(tmpFile, 'w') as outfile:
+ with open(tmpFile, 'w') as outfile: # noqa: PTH123
json.dump(inputs, outfile)
# run the application
asset_command_list = application.get_command_list(appDir)
- indexInputFile = asset_command_list.index('--filenameAIM') + 1
+ indexInputFile = asset_command_list.index('--filenameAIM') + 1 # noqa: N806
asset_command_list[indexInputFile] = tmpFile
asset_command_list.append('--getRV')
command = create_command(asset_command_list)
run_command(command)
- print('RUNNING --getRV:', command)
+ print('RUNNING --getRV:', command) # noqa: T201
else:
- print('MultiModel - run')
- modelToRun = modelToRun - 1
+ print('MultiModel - run') # noqa: T201
+ modelToRun = modelToRun - 1 # noqa: N806
# get app data given model
- appName = appsInMultiModel[modelToRun]
+ appName = appsInMultiModel[modelToRun] # noqa: N806
application = appsRegistry[appName]
application.set_pref(appDataInMultiModel[modelToRun], reference_dir)
# create modified input file for app
- tmpFile = 'MultiModel.' + appKey + '.json'
+ tmpFile = 'MultiModel.' + appKey + '.json' # noqa: N806
# if appKey == "Events":
# inputs["Events"][0]=appRunDataInMultiModel[modelToRun]
@@ -273,22 +273,22 @@ def main(
# inputs[appKey] = appRunDataInMultiModel[modelToRun]
inputs[appKey] = appRunDataInMultiModel[modelToRun]
- print('model to run:', modelToRun)
+ print('model to run:', modelToRun) # noqa: T201
- with open(tmpFile, 'w') as outfile:
+ with open(tmpFile, 'w') as outfile: # noqa: PTH123
json.dump(inputs, outfile)
- print('INPUTS', inputs)
+ print('INPUTS', inputs) # noqa: T201
# run application
asset_command_list = application.get_command_list(appDir)
- indexInputFile = asset_command_list.index('--filenameAIM') + 1
+ indexInputFile = asset_command_list.index('--filenameAIM') + 1 # noqa: N806
asset_command_list[indexInputFile] = tmpFile
command = create_command(asset_command_list)
run_command(command)
- print('RUNNING:', command)
+ print('RUNNING:', command) # noqa: T201
- print('Finished MultiModelApplication')
+ print('Finished MultiModelApplication') # noqa: T201
if __name__ == '__main__':
@@ -307,8 +307,8 @@ def main(
parser.add_argument('--appKey', default=None)
parser.add_argument(
'--registry',
- default=os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ default=os.path.join( # noqa: PTH118
+ os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120
'WorkflowApplications.json',
),
help='Path to file containing registered workflow applications',
@@ -316,8 +316,8 @@ def main(
parser.add_argument(
'-a',
'--appDir',
- default=os.path.dirname(
- os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ default=os.path.dirname( # noqa: PTH120
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # noqa: PTH100, PTH120
),
help='Absolute path to the local application directory.',
)
diff --git a/modules/Workflow/MultiModelDriver.py b/modules/Workflow/MultiModelDriver.py
index 4e7d47322..f976b1c9e 100644
--- a/modules/Workflow/MultiModelDriver.py
+++ b/modules/Workflow/MultiModelDriver.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
#
# This file is part of the SimCenter Backend Applications.
@@ -41,7 +41,7 @@
import sys
from copy import deepcopy
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
from whale.main import (
_parse_app_registry,
@@ -50,86 +50,86 @@
)
-def main(inputFile, driverFile, appKey, registryFile, appDir, runType, osType):
+def main(inputFile, driverFile, appKey, registryFile, appDir, runType, osType): # noqa: C901, N803, D103
#
# get some dir paths, load input file and get data for app, appKey
#
- inputDir = os.path.dirname(inputFile)
- inputFileName = os.path.basename(inputFile)
+ inputDir = os.path.dirname(inputFile) # noqa: PTH120, N806
+ inputFileName = os.path.basename(inputFile) # noqa: PTH119, N806
if inputDir != '':
os.chdir(inputDir)
- with open(inputFileName) as f:
+ with open(inputFileName) as f: # noqa: PTH123
inputs = json.load(f)
- localAppDir = inputs['localAppDir']
- remoteAppDir = inputs['remoteAppDir']
+ localAppDir = inputs['localAppDir'] # noqa: N806
+ remoteAppDir = inputs['remoteAppDir'] # noqa: N806
- appDir = localAppDir
+ appDir = localAppDir # noqa: N806
if runType == 'runningRemote':
- appDir = remoteAppDir
+ appDir = remoteAppDir # noqa: N806
- if 'referenceDir' in inputs:
+ if 'referenceDir' in inputs: # noqa: SIM401
reference_dir = inputs['referenceDir']
else:
reference_dir = inputDir
- appData = {}
+ appData = {} # noqa: N806
if appKey in inputs:
- appData = inputs[appKey]
+ appData = inputs[appKey] # noqa: N806
if 'models' not in appData:
- print('NO models in: ', appData)
- raise KeyError(
- f'"models" not defined in data for "{appKey}" application in the input file "{inputFile}'
+ print('NO models in: ', appData) # noqa: T201
+ raise KeyError( # noqa: TRY003
+ f'"models" not defined in data for "{appKey}" application in the input file "{inputFile}' # noqa: EM102
)
- if len(appData['models']) < 2:
- raise RuntimeError(
- f'At least two models must be provided if the multimodel {appKey} application is used'
+ if len(appData['models']) < 2: # noqa: PLR2004
+ raise RuntimeError( # noqa: TRY003
+ f'At least two models must be provided if the multimodel {appKey} application is used' # noqa: EM102
)
models = appData['models']
- modelToRun = appData['modelToRun']
+ modelToRun = appData['modelToRun'] # noqa: N806
- appsInMultiModel = []
- appDataInMultiModel = []
- appRunDataInMultiModel = []
+ appsInMultiModel = [] # noqa: N806
+ appDataInMultiModel = [] # noqa: N806
+ appRunDataInMultiModel = [] # noqa: N806
beliefs = []
- sumBeliefs = 0
+ sumBeliefs = 0 # noqa: N806
- numModels = 0
+ numModels = 0 # noqa: N806
for model in models:
belief = model['belief']
- appName = model['Application']
- appData = model['ApplicationData']
- appRunData = model['data']
+ appName = model['Application'] # noqa: N806
+ appData = model['ApplicationData'] # noqa: N806
+ appRunData = model['data'] # noqa: N806
beliefs.append(belief)
- sumBeliefs = sumBeliefs + belief
+ sumBeliefs = sumBeliefs + belief # noqa: N806
appsInMultiModel.append(appName)
appDataInMultiModel.append(appData)
appRunDataInMultiModel.append(appRunData)
- numModels = numModels + 1
+ numModels = numModels + 1 # noqa: N806
for i in range(numModels):
beliefs[i] = beliefs[i] / sumBeliefs
- appTypes = [appKey]
+ appTypes = [appKey] # noqa: N806
- parsedRegistry = _parse_app_registry(registryFile, appTypes)
- appsRegistry = parsedRegistry[0][appKey]
+ parsedRegistry = _parse_app_registry(registryFile, appTypes) # noqa: N806
+ appsRegistry = parsedRegistry[0][appKey] # noqa: N806
#
# add RV to input file
#
- randomVariables = inputs['randomVariables']
- rvName = 'MultiModel-' + appKey
- rvValue = 'RV.MultiModel-' + appKey
+ randomVariables = inputs['randomVariables'] # noqa: N806
+ rvName = 'MultiModel-' + appKey # noqa: N806
+ rvValue = 'RV.MultiModel-' + appKey # noqa: N806
- thisRV = {
+ thisRV = { # noqa: N806
'distribution': 'Discrete',
'inputType': 'Parameters',
'name': rvName,
@@ -142,28 +142,28 @@ def main(inputFile, driverFile, appKey, registryFile, appDir, runType, osType):
}
randomVariables.append(thisRV)
- with open(inputFile, 'w') as outfile:
+ with open(inputFile, 'w') as outfile: # noqa: PTH123
json.dump(inputs, outfile)
#
# create driver file that runs the right driver
#
- paramsFileName = 'params.in'
- multiModelString = 'MultiModel'
- exeFileName = 'runMultiModelDriver'
+ paramsFileName = 'params.in' # noqa: N806
+ multiModelString = 'MultiModel' # noqa: N806
+ exeFileName = 'runMultiModelDriver' # noqa: N806
if osType == 'Windows' and runType == 'runningLocal':
- driverFileBat = driverFile + '.bat'
- exeFileName = exeFileName + '.exe'
- with open(driverFileBat, 'wb') as f:
+ driverFileBat = driverFile + '.bat' # noqa: N806
+ exeFileName = exeFileName + '.exe' # noqa: N806
+ with open(driverFileBat, 'wb') as f: # noqa: PTH123
f.write(
bytes(
- os.path.join(appDir, 'applications', 'Workflow', exeFileName)
+ os.path.join(appDir, 'applications', 'Workflow', exeFileName) # noqa: PTH118
+ f' {paramsFileName} {driverFileBat} {multiModelString}',
'UTF-8',
)
)
elif osType == 'Windows' and runType == 'runningRemote':
- with open(driverFile, 'wb') as f:
+ with open(driverFile, 'wb') as f: # noqa: PTH123
f.write(
appDir
+ '/applications/Workflow/'
@@ -172,21 +172,21 @@ def main(inputFile, driverFile, appKey, registryFile, appDir, runType, osType):
'UTF-8',
)
else:
- with open(driverFile, 'wb') as f:
+ with open(driverFile, 'wb') as f: # noqa: PTH123
f.write(
bytes(
- os.path.join(appDir, 'applications', 'Workflow', exeFileName)
+ os.path.join(appDir, 'applications', 'Workflow', exeFileName) # noqa: PTH118
+ f' {paramsFileName} {driverFile} {multiModelString}',
'UTF-8',
)
)
- for modelToRun in range(numModels):
+ for modelToRun in range(numModels): # noqa: N806
#
# run the app to create the driver file for each model
#
- appName = appsInMultiModel[modelToRun]
+ appName = appsInMultiModel[modelToRun] # noqa: N806
application = appsRegistry[appName]
application.set_pref(appDataInMultiModel[modelToRun], reference_dir)
@@ -194,17 +194,17 @@ def main(inputFile, driverFile, appKey, registryFile, appDir, runType, osType):
# create input file for application
#
- modelInputFile = f'MultiModel_{modelToRun + 1}_' + inputFile
- modelDriverFile = f'MultiModel_{modelToRun + 1}_' + driverFile
+ modelInputFile = f'MultiModel_{modelToRun + 1}_' + inputFile # noqa: N806
+ modelDriverFile = f'MultiModel_{modelToRun + 1}_' + driverFile # noqa: N806
- inputsTmp = deepcopy(inputs)
+ inputsTmp = deepcopy(inputs) # noqa: N806
inputsTmp[appKey] = appRunDataInMultiModel[modelToRun]
inputsTmp['Applications'][appKey] = {
'Application': appsInMultiModel[modelToRun],
'ApplicationData': appDataInMultiModel[modelToRun],
}
- with open(modelInputFile, 'w') as outfile:
+ with open(modelInputFile, 'w') as outfile: # noqa: PTH123
json.dump(inputsTmp, outfile)
#
@@ -212,9 +212,9 @@ def main(inputFile, driverFile, appKey, registryFile, appDir, runType, osType):
#
asset_command_list = application.get_command_list(localAppDir)
- indexInputFile = asset_command_list.index('--workflowInput') + 1
+ indexInputFile = asset_command_list.index('--workflowInput') + 1 # noqa: N806
asset_command_list[indexInputFile] = modelInputFile
- indexInputFile = asset_command_list.index('--driverFile') + 1
+ indexInputFile = asset_command_list.index('--driverFile') + 1 # noqa: N806
asset_command_list[indexInputFile] = modelDriverFile
asset_command_list.append('--osType')
asset_command_list.append(osType)
@@ -243,8 +243,8 @@ def main(inputFile, driverFile, appKey, registryFile, appDir, runType, osType):
parser.add_argument('--osType', default=None)
parser.add_argument(
'--registry',
- default=os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ default=os.path.join( # noqa: PTH118
+ os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120
'WorkflowApplications.json',
),
help='Path to file containing registered workflow applications',
@@ -253,8 +253,8 @@ def main(inputFile, driverFile, appKey, registryFile, appDir, runType, osType):
parser.add_argument(
'-a',
'--appDir',
- default=os.path.dirname(
- os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ default=os.path.dirname( # noqa: PTH120
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # noqa: PTH100, PTH120
),
help='Absolute path to the local application directory.',
)
diff --git a/modules/Workflow/PBE.py b/modules/Workflow/PBE.py
index 88f8dec36..7123d3fbf 100644
--- a/modules/Workflow/PBE.py
+++ b/modules/Workflow/PBE.py
@@ -1,11 +1,11 @@
-# written: fmk, adamzs
+# written: fmk, adamzs # noqa: INP001, D100
# import functions for Python 2.X support
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
@@ -17,78 +17,78 @@
divider = '#' * 80
log_output = []
-from WorkflowUtils import *
+from WorkflowUtils import * # noqa: E402, F403
-def main(run_type, inputFile, applicationsRegistry):
+def main(run_type, inputFile, applicationsRegistry): # noqa: C901, N803, D103, PLR0912, PLR0915
# the whole workflow is wrapped within a 'try' block.
# a number of exceptions (files missing, explicit application failures, etc.) are
# handled explicitly to aid the user.
# But unhandled exceptions case the workflow to stop with an error, handled in the
# exception block way at the bottom of this main() function
try:
- workflow_log(divider)
- workflow_log('Start of run')
- workflow_log(divider)
- workflow_log('workflow input file: %s' % inputFile)
- workflow_log('application registry file: %s' % applicationsRegistry)
- workflow_log('runtype: %s' % run_type)
- workflow_log(divider)
+ workflow_log(divider) # noqa: F405
+ workflow_log('Start of run') # noqa: F405
+ workflow_log(divider) # noqa: F405
+ workflow_log('workflow input file: %s' % inputFile) # noqa: F405, UP031
+ workflow_log('application registry file: %s' % applicationsRegistry) # noqa: F405, UP031
+ workflow_log('runtype: %s' % run_type) # noqa: F405, UP031
+ workflow_log(divider) # noqa: F405
#
# first we parse the applications registry to load all possible applications
# - for each application type we place in a dictionary key being name, value containing path to executable
#
- with open(applicationsRegistry) as data_file:
- registryData = json.load(data_file)
+ with open(applicationsRegistry) as data_file: # noqa: PTH123
+ registryData = json.load(data_file) # noqa: N806
# convert all relative paths to full paths
- A = 'Applications'
- Applications = dict()
- appList = 'Event Modeling EDP Simulation UQ'.split(' ')
- appList = [a + A for a in appList]
+ A = 'Applications' # noqa: N806
+ Applications = dict() # noqa: C408, N806
+ appList = 'Event Modeling EDP Simulation UQ'.split(' ') # noqa: N806
+ appList = [a + A for a in appList] # noqa: N806
for app_type in appList:
if app_type in registryData:
- xApplicationData = registryData[app_type]
- applicationsData = xApplicationData['Applications']
+ xApplicationData = registryData[app_type] # noqa: N806
+ applicationsData = xApplicationData['Applications'] # noqa: N806
for app in applicationsData:
- appName = app['Name']
- appExe = app['ExecutablePath']
+ appName = app['Name'] # noqa: N806
+ appExe = app['ExecutablePath'] # noqa: N806
if app_type not in Applications:
- Applications[app_type] = dict()
+ Applications[app_type] = dict() # noqa: C408
Applications[app_type][appName] = appExe
#
# open input file, and parse json into data
#
- with open(inputFile) as data_file:
+ with open(inputFile) as data_file: # noqa: PTH123
data = json.load(data_file)
# convert all relative paths to full paths
# relative2fullpath(data)
if 'runDir' in data:
- runDIR = data['runDir']
+ runDIR = data['runDir'] # noqa: N806
else:
- raise WorkFlowInputError('Need a runDir Entry')
+ raise WorkFlowInputError('Need a runDir Entry') # noqa: EM101, F405, TRY003, TRY301
if 'remoteAppDir' in data:
- remoteAppDir = data['remoteAppDir']
+ remoteAppDir = data['remoteAppDir'] # noqa: N806
else:
- raise WorkFlowInputError('Need a remoteAppDir Entry')
+ raise WorkFlowInputError('Need a remoteAppDir Entry') # noqa: EM101, F405, TRY003, TRY301
if 'localAppDir' in data:
- localAppDir = data['localAppDir']
+ localAppDir = data['localAppDir'] # noqa: N806
else:
- raise WorkFlowInputError('Need a localAppDir Entry')
+ raise WorkFlowInputError('Need a localAppDir Entry') # noqa: EM101, F405, TRY003, TRY301
#
# before running chdir to templatedir
#
- workflow_log('run Directory: %s' % runDIR)
+ workflow_log('run Directory: %s' % runDIR) # noqa: F405, UP031
os.chdir(runDIR)
os.chdir('templatedir')
@@ -100,7 +100,7 @@ def main(run_type, inputFile, applicationsRegistry):
if 'Applications' in data:
available_apps = data['Applications']
else:
- raise WorkFlowInputError('Need an Applications Entry')
+ raise WorkFlowInputError('Need an Applications Entry') # noqa: EM101, F405, TRY003, TRY301
#
# get events, for each the application and its data .. FOR NOW 1 EVENT
@@ -111,42 +111,42 @@ def main(run_type, inputFile, applicationsRegistry):
for event in events:
if 'EventClassification' in event:
- eventClassification = event['EventClassification']
+ eventClassification = event['EventClassification'] # noqa: N806
if eventClassification == 'Earthquake':
if 'Application' in event:
- eventApplication = event['Application']
- eventAppData = event['ApplicationData']
- eventData = event['ApplicationData']
+ eventApplication = event['Application'] # noqa: N806
+ eventAppData = event['ApplicationData'] # noqa: N806
+ eventData = event['ApplicationData'] # noqa: N806
if (
- eventApplication
+ eventApplication # noqa: SIM118
in Applications['EventApplications'].keys()
):
- eventAppExe = Applications['EventApplications'].get(
+ eventAppExe = Applications['EventApplications'].get( # noqa: N806
eventApplication
)
- workflow_log(remoteAppDir)
- workflow_log(eventAppExe)
- eventAppExeLocal = posixpath.join(
+ workflow_log(remoteAppDir) # noqa: F405
+ workflow_log(eventAppExe) # noqa: F405
+ eventAppExeLocal = posixpath.join( # noqa: N806
localAppDir, eventAppExe
)
- eventAppExeRemote = posixpath.join(
+ eventAppExeRemote = posixpath.join( # noqa: N806
remoteAppDir, eventAppExe
)
- workflow_log(eventAppExeRemote)
+ workflow_log(eventAppExeRemote) # noqa: F405
else:
- raise WorkFlowInputError(
- 'Event application %s not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Event application %s not in registry' # noqa: UP031
% eventApplication
)
else:
- raise WorkFlowInputError(
- 'Need an EventApplication section'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need an EventApplication section' # noqa: EM101
)
- # TODO: Fully implement HydroUQ's waterborne events into PBE
+ # TODO: Fully implement HydroUQ's waterborne events into PBE # noqa: TD002
elif (
- eventClassification == 'Tsunami'
+ eventClassification == 'Tsunami' # noqa: PLR1714
or eventClassification == 'Surge'
or eventClassification == 'StormSurge'
or eventClassification == 'Hydro'
@@ -156,175 +156,175 @@ def main(run_type, inputFile, applicationsRegistry):
)
if is_hydrouq_implemented:
if 'Application' in event:
- eventApplication = event['Application']
- eventAppData = event['ApplicationData']
- eventData = event['ApplicationData']
+ eventApplication = event['Application'] # noqa: N806
+ eventAppData = event['ApplicationData'] # noqa: N806
+ eventData = event['ApplicationData'] # noqa: N806, F841
if (
- eventApplication
+ eventApplication # noqa: SIM118
in Applications['EventApplications'].keys()
):
- eventAppExe = Applications[
+ eventAppExe = Applications[ # noqa: N806
'EventApplications'
].get(eventApplication)
- workflow_log(remoteAppDir)
- workflow_log(eventAppExe)
- eventAppExeLocal = posixpath.join(
+ workflow_log(remoteAppDir) # noqa: F405
+ workflow_log(eventAppExe) # noqa: F405
+ eventAppExeLocal = posixpath.join( # noqa: N806
localAppDir, eventAppExe
)
- eventAppExeRemote = posixpath.join(
+ eventAppExeRemote = posixpath.join( # noqa: N806
remoteAppDir, eventAppExe
)
- workflow_log(eventAppExeRemote)
+ workflow_log(eventAppExeRemote) # noqa: F405
else:
- raise WorkFlowInputError(
- 'Event application %s not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Event application %s not in registry' # noqa: UP031
% eventApplication
)
else:
- raise WorkFlowInputError(
- 'Need an EventApplication section'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need an EventApplication section' # noqa: EM101
)
else:
- raise WorkFlowInputError(
- 'HydroUQ waterborne events are not implemented in PBE yet. Please use different workflow for now...'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'HydroUQ waterborne events are not implemented in PBE yet. Please use different workflow for now...' # noqa: EM101
)
else:
- raise WorkFlowInputError(
- 'Event classification must be Earthquake, not %s'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Event classification must be Earthquake, not %s' # noqa: UP031
% eventClassification
)
else:
- raise WorkFlowInputError('Need Event Classification')
+ raise WorkFlowInputError('Need Event Classification') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need an Events Entry in Applications')
+ raise WorkFlowInputError('Need an Events Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
#
# get modeling application and its data
#
if 'Modeling' in available_apps:
- modelingApp = available_apps['Modeling']
+ modelingApp = available_apps['Modeling'] # noqa: N806
if 'Application' in modelingApp:
- modelingApplication = modelingApp['Application']
+ modelingApplication = modelingApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- modelingAppData = modelingApp['ApplicationData']
+ modelingAppData = modelingApp['ApplicationData'] # noqa: N806
if (
- modelingApplication
+ modelingApplication # noqa: SIM118
in Applications['ModelingApplications'].keys()
):
- modelingAppExe = Applications['ModelingApplications'].get(
+ modelingAppExe = Applications['ModelingApplications'].get( # noqa: N806
modelingApplication
)
- modelingAppExeLocal = posixpath.join(localAppDir, modelingAppExe)
- modelingAppExeRemote = posixpath.join(
+ modelingAppExeLocal = posixpath.join(localAppDir, modelingAppExe) # noqa: N806
+ modelingAppExeRemote = posixpath.join( # noqa: N806
remoteAppDir, modelingAppExe
)
else:
- raise WorkFlowInputError(
- 'Modeling application %s not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Modeling application %s not in registry' # noqa: UP031
% modelingApplication
)
else:
- raise WorkFlowInputError(
- 'Need a ModelingApplication in Modeling data'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need a ModelingApplication in Modeling data' # noqa: EM101
)
else:
- raise WorkFlowInputError('Need a Modeling Entry in Applications')
+ raise WorkFlowInputError('Need a Modeling Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
#
# get edp application and its data .. CURRENTLY MODELING APP MUST CREATE EDP
#
if 'EDP' in available_apps:
- edpApp = available_apps['EDP']
+ edpApp = available_apps['EDP'] # noqa: N806
if 'Application' in edpApp:
- edpApplication = edpApp['Application']
+ edpApplication = edpApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- edpAppData = edpApp['ApplicationData']
- if edpApplication in Applications['EDPApplications'].keys():
- edpAppExe = Applications['EDPApplications'].get(edpApplication)
- edpAppExeLocal = posixpath.join(localAppDir, edpAppExe)
- edpAppExeRemote = posixpath.join(remoteAppDir, edpAppExe)
+ edpAppData = edpApp['ApplicationData'] # noqa: N806
+ if edpApplication in Applications['EDPApplications'].keys(): # noqa: SIM118
+ edpAppExe = Applications['EDPApplications'].get(edpApplication) # noqa: N806
+ edpAppExeLocal = posixpath.join(localAppDir, edpAppExe) # noqa: N806
+ edpAppExeRemote = posixpath.join(remoteAppDir, edpAppExe) # noqa: N806
else:
- raise WorkFlowInputError(
- f'EDP application {edpApplication} not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ f'EDP application {edpApplication} not in registry' # noqa: EM102
)
else:
- raise WorkFlowInputError('Need an EDPApplication in EDP data')
+ raise WorkFlowInputError('Need an EDPApplication in EDP data') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need an EDP Entry in Applications')
+ raise WorkFlowInputError('Need an EDP Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
#
# get simulation application and its data
#
if 'Simulation' in available_apps:
- simulationApp = available_apps['Simulation']
+ simulationApp = available_apps['Simulation'] # noqa: N806
if 'Application' in simulationApp:
- simulationApplication = simulationApp['Application']
+ simulationApplication = simulationApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- simAppData = simulationApp['ApplicationData']
+ simAppData = simulationApp['ApplicationData'] # noqa: N806
if (
- simulationApplication
+ simulationApplication # noqa: SIM118
in Applications['SimulationApplications'].keys()
):
- simAppExe = Applications['SimulationApplications'].get(
+ simAppExe = Applications['SimulationApplications'].get( # noqa: N806
simulationApplication
)
- simAppExeLocal = posixpath.join(localAppDir, simAppExe)
- simAppExeRemote = posixpath.join(remoteAppDir, simAppExe)
+ simAppExeLocal = posixpath.join(localAppDir, simAppExe) # noqa: N806
+ simAppExeRemote = posixpath.join(remoteAppDir, simAppExe) # noqa: N806
else:
- raise WorkFlowInputError(
- f'Simulation application {simulationApplication} not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ f'Simulation application {simulationApplication} not in registry' # noqa: EM102
)
else:
- raise WorkFlowInputError(
- 'Need an SimulationApplication in Simulation data'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need an SimulationApplication in Simulation data' # noqa: EM101
)
else:
- raise WorkFlowInputError('Need a Simulation Entry in Applications')
+ raise WorkFlowInputError('Need a Simulation Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
if 'UQ' in available_apps:
- uqApp = available_apps['UQ']
+ uqApp = available_apps['UQ'] # noqa: N806
if 'Application' in uqApp:
- uqApplication = uqApp['Application']
+ uqApplication = uqApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- uqAppData = uqApp['ApplicationData']
- if uqApplication in Applications['UQApplications'].keys():
- uqAppExe = Applications['UQApplications'].get(uqApplication)
- uqAppExeLocal = posixpath.join(localAppDir, uqAppExe)
- uqAppExeRemote = posixpath.join(localAppDir, uqAppExe)
+ uqAppData = uqApp['ApplicationData'] # noqa: N806
+ if uqApplication in Applications['UQApplications'].keys(): # noqa: SIM118
+ uqAppExe = Applications['UQApplications'].get(uqApplication) # noqa: N806
+ uqAppExeLocal = posixpath.join(localAppDir, uqAppExe) # noqa: N806
+ uqAppExeRemote = posixpath.join(localAppDir, uqAppExe) # noqa: N806, F841
else:
- raise WorkFlowInputError(
- f'UQ application {uqApplication} not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ f'UQ application {uqApplication} not in registry' # noqa: EM102
)
else:
- raise WorkFlowInputError('Need a UQApplication in UQ data')
+ raise WorkFlowInputError('Need a UQApplication in UQ data') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need a Simulation Entry in Applications')
+ raise WorkFlowInputError('Need a Simulation Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
- workflow_log('SUCCESS: Parsed workflow input')
- workflow_log(divider)
+ workflow_log('SUCCESS: Parsed workflow input') # noqa: F405
+ workflow_log(divider) # noqa: F405
#
# now invoke the applications
@@ -337,18 +337,18 @@ def main(run_type, inputFile, applicationsRegistry):
# - perform Simulation
# - getDL
- bimFILE = 'dakota.json'
- eventFILE = 'EVENT.json'
- samFILE = 'SAM.json'
- edpFILE = 'EDP.json'
- simFILE = 'SIM.json'
- driverFile = 'driver'
+ bimFILE = 'dakota.json' # noqa: N806
+ eventFILE = 'EVENT.json' # noqa: N806
+ samFILE = 'SAM.json' # noqa: N806
+ edpFILE = 'EDP.json' # noqa: N806
+ simFILE = 'SIM.json' # noqa: N806
+ driverFile = 'driver' # noqa: N806
# open driver file & write building app (minus the --getRV) to it
- driverFILE = open(driverFile, 'w')
+ driverFILE = open(driverFile, 'w') # noqa: SIM115, PTH123, N806
# get RV for event
- eventAppDataList = [
+ eventAppDataList = [ # noqa: N806
f'"{eventAppExeRemote}"',
'--filenameBIM',
bimFILE,
@@ -358,13 +358,13 @@ def main(run_type, inputFile, applicationsRegistry):
if eventAppExe.endswith('.py'):
eventAppDataList.insert(0, 'python')
- for key in eventAppData.keys():
+ for key in eventAppData.keys(): # noqa: SIM118
eventAppDataList.append('--' + key)
value = eventAppData.get(key)
eventAppDataList.append('' + value)
for item in eventAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
eventAppDataList.append('--getRV')
@@ -373,11 +373,11 @@ def main(run_type, inputFile, applicationsRegistry):
else:
eventAppDataList[0] = '' + eventAppExeLocal
- command, result, returncode = runApplication(eventAppDataList)
+ command, result, returncode = runApplication(eventAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# get RV for building model
- modelAppDataList = [
+ modelAppDataList = [ # noqa: N806
f'"{modelingAppExeRemote}"',
'--filenameBIM',
bimFILE,
@@ -390,12 +390,12 @@ def main(run_type, inputFile, applicationsRegistry):
if modelingAppExe.endswith('.py'):
modelAppDataList.insert(0, 'python')
- for key in modelingAppData.keys():
+ for key in modelingAppData.keys(): # noqa: SIM118
modelAppDataList.append('--' + key)
modelAppDataList.append('' + modelingAppData.get(key))
for item in modelAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
modelAppDataList.append('--getRV')
@@ -405,11 +405,11 @@ def main(run_type, inputFile, applicationsRegistry):
else:
modelAppDataList[0] = modelingAppExeLocal
- command, result, returncode = runApplication(modelAppDataList)
+ command, result, returncode = runApplication(modelAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# get RV for EDP!
- edpAppDataList = [
+ edpAppDataList = [ # noqa: N806
f'"{edpAppExeRemote}"',
'--filenameBIM',
bimFILE,
@@ -424,12 +424,12 @@ def main(run_type, inputFile, applicationsRegistry):
if edpAppExe.endswith('.py'):
edpAppDataList.insert(0, 'python')
- for key in edpAppData.keys():
+ for key in edpAppData.keys(): # noqa: SIM118
edpAppDataList.append('--' + key)
edpAppDataList.append('' + edpAppData.get(key))
for item in edpAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
if edpAppExe.endswith('.py'):
@@ -438,11 +438,11 @@ def main(run_type, inputFile, applicationsRegistry):
edpAppDataList[0] = edpAppExeLocal
edpAppDataList.append('--getRV')
- command, result, returncode = runApplication(edpAppDataList)
+ command, result, returncode = runApplication(edpAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# get RV for Simulation
- simAppDataList = [
+ simAppDataList = [ # noqa: N806
f'"{simAppExeRemote}"',
'--filenameBIM',
bimFILE,
@@ -459,12 +459,12 @@ def main(run_type, inputFile, applicationsRegistry):
if simAppExe.endswith('.py'):
simAppDataList.insert(0, 'python')
- for key in simAppData.keys():
+ for key in simAppData.keys(): # noqa: SIM118
simAppDataList.append('--' + key)
simAppDataList.append('' + simAppData.get(key))
for item in simAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
simAppDataList.append('--getRV')
@@ -473,13 +473,13 @@ def main(run_type, inputFile, applicationsRegistry):
else:
simAppDataList[0] = simAppExeLocal
- command, result, returncode = runApplication(simAppDataList)
+ command, result, returncode = runApplication(simAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# perform the simulation
driverFILE.close()
- uqAppDataList = [
+ uqAppDataList = [ # noqa: N806
f'"{uqAppExeLocal}"',
'--filenameBIM',
bimFILE,
@@ -502,7 +502,7 @@ def main(run_type, inputFile, applicationsRegistry):
uqAppDataList.append('--runType')
uqAppDataList.append(run_type)
- for key in uqAppData.keys():
+ for key in uqAppData.keys(): # noqa: SIM118
uqAppDataList.append('--' + key)
value = uqAppData.get(key)
if isinstance(value, string_types):
@@ -510,50 +510,50 @@ def main(run_type, inputFile, applicationsRegistry):
else:
uqAppDataList.append('' + str(value))
- if run_type == 'run' or run_type == 'set_up':
- workflow_log('Running Simulation...')
- workflow_log(' '.join(uqAppDataList))
- command, result, returncode = runApplication(uqAppDataList)
+ if run_type == 'run' or run_type == 'set_up': # noqa: PLR1714
+ workflow_log('Running Simulation...') # noqa: F405
+ workflow_log(' '.join(uqAppDataList)) # noqa: F405
+ command, result, returncode = runApplication(uqAppDataList) # noqa: F405
log_output.append([command, result, returncode])
- workflow_log('Simulation ended...')
+ workflow_log('Simulation ended...') # noqa: F405
else:
- workflow_log('Setup run only. No simulation performed.')
+ workflow_log('Setup run only. No simulation performed.') # noqa: F405
- except WorkFlowInputError as e:
- print('workflow error: %s' % e.value)
- workflow_log('workflow error: %s' % e.value)
- workflow_log(divider)
- exit(1)
+ except WorkFlowInputError as e: # noqa: F405
+ print('workflow error: %s' % e.value) # noqa: T201, UP031
+ workflow_log('workflow error: %s' % e.value) # noqa: F405, UP031
+ workflow_log(divider) # noqa: F405
+ exit(1) # noqa: PLR1722
# unhandled exceptions are handled here
except Exception as e:
- print('workflow error: %s' % e.value)
- workflow_log('unhandled exception... exiting')
+ print('workflow error: %s' % e.value) # noqa: T201, UP031
+ workflow_log('unhandled exception... exiting') # noqa: F405
raise
if __name__ == '__main__':
- if len(sys.argv) != 4:
- print('\nNeed three arguments, e.g.:\n')
- print(
- ' python %s action workflowinputfile.json workflowapplications.json'
+ if len(sys.argv) != 4: # noqa: PLR2004
+ print('\nNeed three arguments, e.g.:\n') # noqa: T201
+ print( # noqa: T201
+ ' python %s action workflowinputfile.json workflowapplications.json' # noqa: UP031
% sys.argv[0]
)
- print('\nwhere: action is either check or run\n')
- exit(1)
+ print('\nwhere: action is either check or run\n') # noqa: T201
+ exit(1) # noqa: PLR1722
run_type = sys.argv[1]
- inputFile = sys.argv[2]
- applicationsRegistry = sys.argv[3]
+ inputFile = sys.argv[2] # noqa: N816
+ applicationsRegistry = sys.argv[3] # noqa: N816
main(run_type, inputFile, applicationsRegistry)
- workflow_log_file = 'workflow-log-%s.txt' % (
+ workflow_log_file = 'workflow-log-%s.txt' % ( # noqa: UP031
strftime('%Y-%m-%d-%H-%M-%S-utc', gmtime())
)
- log_filehandle = open(workflow_log_file, 'w')
+ log_filehandle = open(workflow_log_file, 'w') # noqa: SIM115, PTH123
- print(type(log_filehandle))
+ print(type(log_filehandle)) # noqa: T201
print(divider, file=log_filehandle)
print('Start of Log', file=log_filehandle)
print(divider, file=log_filehandle)
@@ -561,13 +561,13 @@ def main(run_type, inputFile, applicationsRegistry):
# nb: log_output is a global variable, defined at the top of this script.
for result in log_output:
print(divider, file=log_filehandle)
- print('command line:\n%s\n' % result[0], file=log_filehandle)
+ print('command line:\n%s\n' % result[0], file=log_filehandle) # noqa: UP031
print(divider, file=log_filehandle)
- print('output from process:\n%s\n' % result[1], file=log_filehandle)
+ print('output from process:\n%s\n' % result[1], file=log_filehandle) # noqa: UP031
print(divider, file=log_filehandle)
print('End of Log', file=log_filehandle)
print(divider, file=log_filehandle)
- workflow_log('Log file: %s' % workflow_log_file)
- workflow_log('End of run.')
+ workflow_log('Log file: %s' % workflow_log_file) # noqa: F405, UP031
+ workflow_log('End of run.') # noqa: F405
diff --git a/modules/Workflow/PBE_workflow.py b/modules/Workflow/PBE_workflow.py
index 4291184ea..2fb84c947 100644
--- a/modules/Workflow/PBE_workflow.py
+++ b/modules/Workflow/PBE_workflow.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -46,25 +46,25 @@
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
import whale.main as whale
from whale.main import log_div, log_msg
-def main(run_type, input_file, app_registry):
+def main(run_type, input_file, app_registry): # noqa: D103
# initialize the log file
- with open(input_file) as f:
+ with open(input_file) as f: # noqa: PTH123
inputs = json.load(f)
- runDir = inputs['runDir']
+ runDir = inputs['runDir'] # noqa: N806
whale.log_file = runDir + '/log.txt'
- with open(whale.log_file, 'w') as f:
+ with open(whale.log_file, 'w') as f: # noqa: PTH123
f.write('PBE workflow\n')
# echo the inputs
@@ -81,10 +81,10 @@ def main(run_type, input_file, app_registry):
is not None
):
run_type = 'loss_only'
- except:
+ except: # noqa: S110, E722
pass
- WF = whale.Workflow(
+ WF = whale.Workflow( # noqa: N806
run_type,
input_file,
app_registry,
@@ -112,13 +112,13 @@ def main(run_type, input_file, app_registry):
if __name__ == '__main__':
- if len(sys.argv) != 4:
- print('\nNeed three arguments, e.g.:\n')
- print(
- ' python %s action workflowinputfile.json workflowapplications.json'
+ if len(sys.argv) != 4: # noqa: PLR2004
+ print('\nNeed three arguments, e.g.:\n') # noqa: T201
+ print( # noqa: T201
+ ' python %s action workflowinputfile.json workflowapplications.json' # noqa: UP031
% sys.argv[0]
)
- print('\nwhere: action is either check or run\n')
- exit(1)
+ print('\nwhere: action is either check or run\n') # noqa: T201
+ exit(1) # noqa: PLR1722
main(run_type=sys.argv[1], input_file=sys.argv[2], app_registry=sys.argv[3])
diff --git a/modules/Workflow/R2DTool_workflow.py b/modules/Workflow/R2DTool_workflow.py
index 2a22f36f3..acb30f815 100644
--- a/modules/Workflow/R2DTool_workflow.py
+++ b/modules/Workflow/R2DTool_workflow.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -46,13 +46,13 @@
import sys
from pathlib import Path
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
import whale.main as whale
from whale.main import log_div, log_msg
-def main(
+def main( # noqa: D103
run_type,
input_file,
app_registry,
@@ -64,20 +64,20 @@ def main(
log_file,
):
# initialize the log file
- with open(input_file) as f:
+ with open(input_file) as f: # noqa: PTH123
inputs = json.load(f)
if working_dir is not None:
- runDir = working_dir
+ runDir = working_dir # noqa: N806
else:
- runDir = inputs['runDir']
+ runDir = inputs['runDir'] # noqa: N806
- if not os.path.exists(runDir):
- os.mkdir(runDir)
+ if not os.path.exists(runDir): # noqa: PTH110
+ os.mkdir(runDir) # noqa: PTH102
if log_file == 'log.txt':
whale.log_file = runDir + '/log.txt'
else:
whale.log_file = log_file
- with open(whale.log_file, 'w') as f:
+ with open(whale.log_file, 'w') as f: # noqa: PTH123
f.write('RDT workflow\n')
whale.print_system_info()
@@ -89,7 +89,7 @@ def main(
if force_cleanup:
log_msg('Forced cleanup turned on.')
- WF = whale.Workflow(
+ WF = whale.Workflow( # noqa: N806
run_type,
input_file,
app_registry,
@@ -112,7 +112,7 @@ def main(
)
if bldg_id_filter is not None:
- print(bldg_id_filter)
+ print(bldg_id_filter) # noqa: T201
log_msg(f'Overriding simulation scope; running buildings {bldg_id_filter}')
# If a Min or Max attribute is used when calling the script, we need to
@@ -126,8 +126,8 @@ def main(
building_file = WF.create_building_files()
WF.perform_regional_mapping(building_file)
- # TODO: not elegant code, fix later
- with open(WF.building_file_path) as f:
+ # TODO: not elegant code, fix later # noqa: TD002
+ with open(WF.building_file_path) as f: # noqa: PTH123
bldg_data = json.load(f)
for bldg in bldg_data: # [:1]:
@@ -170,7 +170,7 @@ def main(
if __name__ == '__main__':
# Defining the command line arguments
- workflowArgParser = argparse.ArgumentParser(
+ workflowArgParser = argparse.ArgumentParser( # noqa: N816
'Run the NHERI SimCenter workflow for a set of assets.', allow_abbrev=False
)
@@ -190,8 +190,8 @@ def main(
workflowArgParser.add_argument(
'-r',
'--registry',
- default=os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ default=os.path.join( # noqa: PTH118
+ os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120
'WorkflowApplications.json',
),
help='Path to file containing registered workflow applications',
@@ -205,13 +205,13 @@ def main(
workflowArgParser.add_argument(
'-d',
'--referenceDir',
- default=os.path.join(os.getcwd(), 'input_data'),
+ default=os.path.join(os.getcwd(), 'input_data'), # noqa: PTH109, PTH118
help='Relative paths in the config file are referenced to this directory.',
)
workflowArgParser.add_argument(
'-w',
'--workDir',
- default=os.path.join(os.getcwd(), 'results'),
+ default=os.path.join(os.getcwd(), 'results'), # noqa: PTH109, PTH118
help='Absolute path to the working directory.',
)
workflowArgParser.add_argument(
@@ -228,11 +228,11 @@ def main(
)
# Parsing the command line arguments
- wfArgs = workflowArgParser.parse_args()
+ wfArgs = workflowArgParser.parse_args() # noqa: N816
# update the local app dir with the default - if needed
if wfArgs.appDir is None:
- workflow_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
+ workflow_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120
wfArgs.appDir = workflow_dir.parents[1]
if wfArgs.check:
diff --git a/modules/Workflow/RegionalEarthquakeSimulation.py b/modules/Workflow/RegionalEarthquakeSimulation.py
index 00339f9e7..55667496b 100755
--- a/modules/Workflow/RegionalEarthquakeSimulation.py
+++ b/modules/Workflow/RegionalEarthquakeSimulation.py
@@ -1,4 +1,4 @@
-# written: fmk
+# written: fmk # noqa: EXE002, INP001, D100
import json
import os
@@ -8,69 +8,69 @@
divider = '#' * 80
log_output = []
-from WorkflowUtils import *
+from WorkflowUtils import * # noqa: E402, F403
-def main(run_type, inputFile, applicationsRegistry):
+def main(run_type, inputFile, applicationsRegistry): # noqa: C901, N803, D103, PLR0912, PLR0915
# the whole workflow is wrapped within a 'try' block.
# a number of exceptions (files missing, explicit application failures, etc.) are
# handled explicitly to aid the user.
# But unhandled exceptions case the workflow to stop with an error, handled in the
# exception block way at the bottom of this main() function
try:
- workflow_log(divider)
- workflow_log('Start of run')
- workflow_log(divider)
- workflow_log('workflow input file: %s' % inputFile)
- workflow_log('application registry file: %s' % applicationsRegistry)
- workflow_log('runtype: %s' % run_type)
- workflow_log(divider)
+ workflow_log(divider) # noqa: F405
+ workflow_log('Start of run') # noqa: F405
+ workflow_log(divider) # noqa: F405
+ workflow_log('workflow input file: %s' % inputFile) # noqa: F405, UP031
+ workflow_log('application registry file: %s' % applicationsRegistry) # noqa: F405, UP031
+ workflow_log('runtype: %s' % run_type) # noqa: F405, UP031
+ workflow_log(divider) # noqa: F405
#
# first we parse the applications registry to load all possible applications
# - for each application type we place in a dictionary key being name, value containing path to executable
#
- with open(applicationsRegistry) as data_file:
- registryData = json.load(data_file)
+ with open(applicationsRegistry) as data_file: # noqa: PTH123
+ registryData = json.load(data_file) # noqa: N806
# convert all relative paths to full paths
- relative2fullpath(registryData)
+ relative2fullpath(registryData) # noqa: F405
- A = 'Applications'
- Applications = dict()
- appList = 'Building Event Modeling EDP Simulation UQ DamageAndLoss'.split(
+ A = 'Applications' # noqa: N806
+ Applications = dict() # noqa: C408, N806
+ appList = 'Building Event Modeling EDP Simulation UQ DamageAndLoss'.split( # noqa: N806
' '
)
- appList = [a + A for a in appList]
+ appList = [a + A for a in appList] # noqa: N806
for app_type in appList:
if app_type in registryData:
- xApplicationData = registryData[app_type]
- applicationsData = xApplicationData['Applications']
+ xApplicationData = registryData[app_type] # noqa: N806
+ applicationsData = xApplicationData['Applications'] # noqa: N806
for app in applicationsData:
- appName = app['Name']
- appExe = app['ExecutablePath']
+ appName = app['Name'] # noqa: N806
+ appExe = app['ExecutablePath'] # noqa: N806
if app_type not in Applications:
- Applications[app_type] = dict()
+ Applications[app_type] = dict() # noqa: C408
Applications[app_type][appName] = appExe
#
# open input file, and parse json into data
#
- with open(inputFile) as data_file:
+ with open(inputFile) as data_file: # noqa: PTH123
data = json.load(data_file)
# convert all relative paths to full paths
- relative2fullpath(data)
+ relative2fullpath(data) # noqa: F405
#
# get all application data, quit if error
#
if 'WorkflowType' in data:
- typeWorkflow = data['WorkflowType']
+ typeWorkflow = data['WorkflowType'] # noqa: N806, F841
else:
- raise WorkFlowInputError('Need a Workflow Type')
+ raise WorkFlowInputError('Need a Workflow Type') # noqa: EM101, F405, TRY003, TRY301
# check correct workflow type
@@ -81,40 +81,40 @@ def main(run_type, inputFile, applicationsRegistry):
if 'Applications' in data:
available_apps = data['Applications']
else:
- raise WorkFlowInputError('Need an Applications Entry')
+ raise WorkFlowInputError('Need an Applications Entry') # noqa: EM101, F405, TRY003, TRY301
#
# get building application and its data
#
if 'Buildings' in available_apps:
- buildingApp = available_apps['Buildings']
+ buildingApp = available_apps['Buildings'] # noqa: N806
if 'BuildingApplication' in buildingApp:
- buildingApplication = buildingApp['BuildingApplication']
+ buildingApplication = buildingApp['BuildingApplication'] # noqa: N806
# check building app in registry, if so get full executable path
- buildingAppData = buildingApp['ApplicationData']
+ buildingAppData = buildingApp['ApplicationData'] # noqa: N806
if (
- buildingApplication
+ buildingApplication # noqa: SIM118
in Applications['BuildingApplications'].keys()
):
- buildingAppExe = Applications['BuildingApplications'].get(
+ buildingAppExe = Applications['BuildingApplications'].get( # noqa: N806
buildingApplication
)
else:
- raise WorkFlowInputError(
- 'Building application %s not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Building application %s not in registry' # noqa: UP031
% buildingApplication
)
else:
- raise WorkFlowInputError(
- 'Need a Building Generator Application in Buildings'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need a Building Generator Application in Buildings' # noqa: EM101
)
else:
- raise WorkFlowInputError('Need a Buildings Entry in Applications')
+ raise WorkFlowInputError('Need a Buildings Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
#
# get events, for each the application and its data .. FOR NOW 1 EVENT
@@ -125,179 +125,179 @@ def main(run_type, inputFile, applicationsRegistry):
for event in events:
if 'EventClassification' in event:
- eventClassification = event['EventClassification']
+ eventClassification = event['EventClassification'] # noqa: N806
if eventClassification == 'Earthquake':
if 'EventApplication' in event:
- eventApplication = event['EventApplication']
- eventAppData = event['ApplicationData']
- eventData = event['ApplicationData']
+ eventApplication = event['EventApplication'] # noqa: N806
+ eventAppData = event['ApplicationData'] # noqa: N806
+ eventData = event['ApplicationData'] # noqa: N806, F841
if (
- eventApplication
+ eventApplication # noqa: SIM118
in Applications['EventApplications'].keys()
):
- eventAppExe = Applications['EventApplications'].get(
+ eventAppExe = Applications['EventApplications'].get( # noqa: N806
eventApplication
)
else:
- raise WorkFlowInputError(
- 'Event application %s not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Event application %s not in registry' # noqa: UP031
% eventApplication
)
else:
- raise WorkFlowInputError(
- 'Need an EventApplication section'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need an EventApplication section' # noqa: EM101
)
else:
- raise WorkFlowInputError(
- 'Event classification must be Earthquake, not %s'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Event classification must be Earthquake, not %s' # noqa: UP031
% eventClassification
)
else:
- raise WorkFlowInputError('Need Event Classification')
+ raise WorkFlowInputError('Need Event Classification') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need an Events Entry in Applications')
+ raise WorkFlowInputError('Need an Events Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
#
# get modeling application and its data
#
if 'Modeling' in available_apps:
- modelingApp = available_apps['Modeling']
+ modelingApp = available_apps['Modeling'] # noqa: N806
if 'ModelingApplication' in modelingApp:
- modelingApplication = modelingApp['ModelingApplication']
+ modelingApplication = modelingApp['ModelingApplication'] # noqa: N806
# check modeling app in registry, if so get full executable path
- modelingAppData = modelingApp['ApplicationData']
+ modelingAppData = modelingApp['ApplicationData'] # noqa: N806
if (
- modelingApplication
+ modelingApplication # noqa: SIM118
in Applications['ModelingApplications'].keys()
):
- modelingAppExe = Applications['ModelingApplications'].get(
+ modelingAppExe = Applications['ModelingApplications'].get( # noqa: N806
modelingApplication
)
else:
- raise WorkFlowInputError(
- 'Modeling application %s not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Modeling application %s not in registry' # noqa: UP031
% modelingApplication
)
else:
- raise WorkFlowInputError(
- 'Need a ModelingApplication in Modeling data'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need a ModelingApplication in Modeling data' # noqa: EM101
)
else:
- raise WorkFlowInputError('Need a Modeling Entry in Applications')
+ raise WorkFlowInputError('Need a Modeling Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
#
# get edp application and its data
#
if 'EDP' in available_apps:
- edpApp = available_apps['EDP']
+ edpApp = available_apps['EDP'] # noqa: N806
if 'EDPApplication' in edpApp:
- edpApplication = edpApp['EDPApplication']
+ edpApplication = edpApp['EDPApplication'] # noqa: N806
# check modeling app in registry, if so get full executable path
- edpAppData = edpApp['ApplicationData']
- if edpApplication in Applications['EDPApplications'].keys():
- edpAppExe = Applications['EDPApplications'].get(edpApplication)
+ edpAppData = edpApp['ApplicationData'] # noqa: N806
+ if edpApplication in Applications['EDPApplications'].keys(): # noqa: SIM118
+ edpAppExe = Applications['EDPApplications'].get(edpApplication) # noqa: N806
else:
- raise WorkFlowInputError(
- 'EDP application %s not in registry',
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'EDP application %s not in registry', # noqa: EM101
edpApplication,
)
else:
- raise WorkFlowInputError('Need an EDPApplication in EDP data')
+ raise WorkFlowInputError('Need an EDPApplication in EDP data') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need an EDP Entry in Applications')
+ raise WorkFlowInputError('Need an EDP Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
if 'Simulation' in available_apps:
- simulationApp = available_apps['Simulation']
+ simulationApp = available_apps['Simulation'] # noqa: N806
if 'SimulationApplication' in simulationApp:
- simulationApplication = simulationApp['SimulationApplication']
+ simulationApplication = simulationApp['SimulationApplication'] # noqa: N806
# check modeling app in registry, if so get full executable path
- simAppData = simulationApp['ApplicationData']
+ simAppData = simulationApp['ApplicationData'] # noqa: N806
if (
- simulationApplication
+ simulationApplication # noqa: SIM118
in Applications['SimulationApplications'].keys()
):
- simAppExe = Applications['SimulationApplications'].get(
+ simAppExe = Applications['SimulationApplications'].get( # noqa: N806
simulationApplication
)
else:
- raise WorkFlowInputError(
- 'Simulation application %s not in registry',
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Simulation application %s not in registry', # noqa: EM101
simulationApplication,
)
else:
- raise WorkFlowInputError(
- 'Need an SimulationApplication in Simulation data'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need an SimulationApplication in Simulation data' # noqa: EM101
)
else:
- raise WorkFlowInputError('Need a Simulation Entry in Applications')
+ raise WorkFlowInputError('Need a Simulation Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
if 'UQ-Simulation' in available_apps:
- uqApp = available_apps['UQ-Simulation']
+ uqApp = available_apps['UQ-Simulation'] # noqa: N806
if 'UQApplication' in uqApp:
- uqApplication = uqApp['UQApplication']
+ uqApplication = uqApp['UQApplication'] # noqa: N806
# check modeling app in registry, if so get full executable path
- uqAppData = uqApp['ApplicationData']
- if uqApplication in Applications['UQApplications'].keys():
- uqAppExe = Applications['UQApplications'].get(uqApplication)
+ uqAppData = uqApp['ApplicationData'] # noqa: N806
+ if uqApplication in Applications['UQApplications'].keys(): # noqa: SIM118
+ uqAppExe = Applications['UQApplications'].get(uqApplication) # noqa: N806
else:
- raise WorkFlowInputError(
- 'UQ application %s not in registry',
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'UQ application %s not in registry', # noqa: EM101
uqApplication,
)
else:
- raise WorkFlowInputError('Need a UQApplication in UQ data')
+ raise WorkFlowInputError('Need a UQApplication in UQ data') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need a Simulation Entry in Applications')
+ raise WorkFlowInputError('Need a Simulation Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
if 'Damage&Loss' in available_apps:
- DLApp = available_apps['Damage&Loss']
+ DLApp = available_apps['Damage&Loss'] # noqa: N806
if 'Damage&LossApplication' in DLApp:
- dlApplication = DLApp['Damage&LossApplication']
+ dlApplication = DLApp['Damage&LossApplication'] # noqa: N806
# check modeling app in registry, if so get full executable path
- dlAppData = DLApp['ApplicationData']
- if dlApplication in Applications['DamageAndLossApplications'].keys():
- dlAppExe = Applications['DamageAndLossApplications'].get(
+ dlAppData = DLApp['ApplicationData'] # noqa: N806
+ if dlApplication in Applications['DamageAndLossApplications'].keys(): # noqa: SIM118
+ dlAppExe = Applications['DamageAndLossApplications'].get( # noqa: N806
dlApplication
)
else:
- raise WorkFlowInputError(
- 'Dmage & Loss application %s not in registry' % dlApplication
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Dmage & Loss application %s not in registry' % dlApplication # noqa: UP031
)
else:
- raise WorkFlowInputError(
- 'Need a Damage&LossApplicationApplication in Damage & Loss data'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need a Damage&LossApplicationApplication in Damage & Loss data' # noqa: EM101
)
else:
- raise WorkFlowInputError('Need a Simulation Entry in Applications')
+ raise WorkFlowInputError('Need a Simulation Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
- workflow_log('SUCCESS: Parsed workflow input')
- workflow_log(divider)
+ workflow_log('SUCCESS: Parsed workflow input') # noqa: F405
+ workflow_log(divider) # noqa: F405
#
# now invoke the applications
@@ -307,17 +307,17 @@ def main(run_type, inputFile, applicationsRegistry):
# put building generator application data into list and exe
#
- buildingsFile = 'buildings.json'
- buildingAppDataList = [buildingAppExe, buildingsFile]
+ buildingsFile = 'buildings.json' # noqa: N806
+ buildingAppDataList = [buildingAppExe, buildingsFile] # noqa: N806
- for key in buildingAppData.keys():
+ for key in buildingAppData.keys(): # noqa: SIM118
buildingAppDataList.append('-' + key.encode('ascii', 'ignore'))
buildingAppDataList.append(
buildingAppData.get(key).encode('ascii', 'ignore')
)
buildingAppDataList.append('--getRV')
- command, result, returncode = runApplication(buildingAppDataList)
+ command, result, returncode = runApplication(buildingAppDataList) # noqa: F405
log_output.append([command, result, returncode])
del buildingAppDataList[-1]
@@ -331,27 +331,27 @@ def main(run_type, inputFile, applicationsRegistry):
# - perform Simulation
# - getDL
- with open(buildingsFile) as data_file:
+ with open(buildingsFile) as data_file: # noqa: PTH123
data = json.load(data_file)
for building in data:
- id = building['id']
- bimFILE = building['file']
- eventFILE = id + '-EVENT.json'
- samFILE = id + '-SAM.json'
- edpFILE = id + '-EDP.json'
- dlFILE = id + '-DL.json'
- simFILE = id + '-SIM.json'
- driverFile = id + '-driver'
+ id = building['id'] # noqa: A001
+ bimFILE = building['file'] # noqa: N806
+ eventFILE = id + '-EVENT.json' # noqa: N806
+ samFILE = id + '-SAM.json' # noqa: N806
+ edpFILE = id + '-EDP.json' # noqa: N806
+ dlFILE = id + '-DL.json' # noqa: N806
+ simFILE = id + '-SIM.json' # noqa: N806
+ driverFile = id + '-driver' # noqa: N806
# open driver file & write building app (minus the --getRV) to it
- driverFILE = open(driverFile, 'w')
+ driverFILE = open(driverFile, 'w') # noqa: SIM115, PTH123, N806
for item in buildingAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
# get RV for event
- eventAppDataList = [
+ eventAppDataList = [ # noqa: N806
eventAppExe,
'--filenameAIM',
bimFILE,
@@ -361,23 +361,23 @@ def main(run_type, inputFile, applicationsRegistry):
if eventAppExe.endswith('.py'):
eventAppDataList.insert(0, 'python')
- for key in eventAppData.keys():
+ for key in eventAppData.keys(): # noqa: SIM118
eventAppDataList.append('-' + key.encode('ascii', 'ignore'))
value = eventAppData.get(key)
- if os.path.exists(value) and not os.path.isabs(value):
- value = os.path.abspath(value)
+ if os.path.exists(value) and not os.path.isabs(value): # noqa: PTH110, PTH117
+ value = os.path.abspath(value) # noqa: PTH100
eventAppDataList.append(value.encode('ascii', 'ignore'))
for item in eventAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
eventAppDataList.append('--getRV')
- command, result, returncode = runApplication(eventAppDataList)
+ command, result, returncode = runApplication(eventAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# get RV for building model
- modelAppDataList = [
+ modelAppDataList = [ # noqa: N806
modelingAppExe,
'--filenameAIM',
bimFILE,
@@ -387,22 +387,22 @@ def main(run_type, inputFile, applicationsRegistry):
samFILE,
]
- for key in modelingAppData.keys():
+ for key in modelingAppData.keys(): # noqa: SIM118
modelAppDataList.append('-' + key.encode('ascii', 'ignore'))
modelAppDataList.append(
modelingAppData.get(key).encode('ascii', 'ignore')
)
for item in modelAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
modelAppDataList.append('--getRV')
- command, result, returncode = runApplication(modelAppDataList)
+ command, result, returncode = runApplication(modelAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# get RV for EDP!
- edpAppDataList = [
+ edpAppDataList = [ # noqa: N806
edpAppExe,
'--filenameAIM',
bimFILE,
@@ -414,20 +414,20 @@ def main(run_type, inputFile, applicationsRegistry):
edpFILE,
]
- for key in edpAppData.keys():
+ for key in edpAppData.keys(): # noqa: SIM118
edpAppDataList.append('-' + key.encode('ascii', 'ignore'))
edpAppDataList.append(edpAppData.get(key).encode('ascii', 'ignore'))
for item in edpAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
edpAppDataList.append('--getRV')
- command, result, returncode = runApplication(edpAppDataList)
+ command, result, returncode = runApplication(edpAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# get RV for Simulation
- simAppDataList = [
+ simAppDataList = [ # noqa: N806
simAppExe,
'--filenameAIM',
bimFILE,
@@ -441,20 +441,20 @@ def main(run_type, inputFile, applicationsRegistry):
simFILE,
]
- for key in simAppData.keys():
+ for key in simAppData.keys(): # noqa: SIM118
simAppDataList.append('-' + key.encode('ascii', 'ignore'))
simAppDataList.append(simAppData.get(key).encode('ascii', 'ignore'))
for item in simAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
simAppDataList.append('--getRV')
- command, result, returncode = runApplication(simAppDataList)
+ command, result, returncode = runApplication(simAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# Adding CreateLoss to Dakota Driver
- dlAppDataList = [
+ dlAppDataList = [ # noqa: N806
dlAppExe,
'--filenameAIM',
bimFILE,
@@ -464,17 +464,17 @@ def main(run_type, inputFile, applicationsRegistry):
dlFILE,
]
- for key in dlAppData.keys():
+ for key in dlAppData.keys(): # noqa: SIM118
dlAppDataList.append('-' + key.encode('ascii', 'ignore'))
dlAppDataList.append(dlAppData.get(key).encode('ascii', 'ignore'))
for item in dlAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
# perform the simulation
driverFILE.close()
- uqAppDataList = [
+ uqAppDataList = [ # noqa: N806
uqAppExe,
'--filenameAIM',
bimFILE,
@@ -492,66 +492,66 @@ def main(run_type, inputFile, applicationsRegistry):
driverFile,
]
- for key in uqAppData.keys():
+ for key in uqAppData.keys(): # noqa: SIM118
uqAppDataList.append('-' + key.encode('ascii', 'ignore'))
uqAppDataList.append(simAppData.get(key).encode('ascii', 'ignore'))
if run_type == 'run':
- workflow_log('Running Simulation...')
- workflow_log(' '.join(uqAppDataList))
- command, result, returncode = runApplication(uqAppDataList)
+ workflow_log('Running Simulation...') # noqa: F405
+ workflow_log(' '.join(uqAppDataList)) # noqa: F405
+ command, result, returncode = runApplication(uqAppDataList) # noqa: F405
log_output.append([command, result, returncode])
- workflow_log('Simulation ended...')
+ workflow_log('Simulation ended...') # noqa: F405
else:
- workflow_log('Check run only. No simulation performed.')
+ workflow_log('Check run only. No simulation performed.') # noqa: F405
- except WorkFlowInputError as e:
- workflow_log('workflow error: %s' % e.value)
- workflow_log(divider)
- exit(1)
+ except WorkFlowInputError as e: # noqa: F405
+ workflow_log('workflow error: %s' % e.value) # noqa: F405, UP031
+ workflow_log(divider) # noqa: F405
+ exit(1) # noqa: PLR1722
# unhandled exceptions are handled here
except:
raise
- workflow_log('unhandled exception... exiting')
- exit(1)
+ workflow_log('unhandled exception... exiting') # noqa: F405
+ exit(1) # noqa: PLR1722
if __name__ == '__main__':
- if len(sys.argv) != 4:
- print('\nNeed three arguments, e.g.:\n')
- print(
- ' python %s action workflowinputfile.json workflowapplications.json'
+ if len(sys.argv) != 4: # noqa: PLR2004
+ print('\nNeed three arguments, e.g.:\n') # noqa: T201
+ print( # noqa: T201
+ ' python %s action workflowinputfile.json workflowapplications.json' # noqa: UP031
% sys.argv[0]
)
- print('\nwhere: action is either check or run\n')
- exit(1)
+ print('\nwhere: action is either check or run\n') # noqa: T201
+ exit(1) # noqa: PLR1722
run_type = sys.argv[1]
- inputFile = sys.argv[2]
- applicationsRegistry = sys.argv[3]
+ inputFile = sys.argv[2] # noqa: N816
+ applicationsRegistry = sys.argv[3] # noqa: N816
main(run_type, inputFile, applicationsRegistry)
- workflow_log_file = 'workflow-log-%s.txt' % (
+ workflow_log_file = 'workflow-log-%s.txt' % ( # noqa: UP031
strftime('%Y-%m-%d-%H-%M-%S-utc', gmtime())
)
- log_filehandle = open(workflow_log_file, 'wb')
+ log_filehandle = open(workflow_log_file, 'wb') # noqa: SIM115, PTH123
- print >> log_filehandle, divider
- print >> log_filehandle, 'Start of Log'
- print >> log_filehandle, divider
- print >> log_filehandle, workflow_log_file
+ print >> log_filehandle, divider # noqa: F633
+ print >> log_filehandle, 'Start of Log' # noqa: F633
+ print >> log_filehandle, divider # noqa: F633
+ print >> log_filehandle, workflow_log_file # noqa: F633
# nb: log_output is a global variable, defined at the top of this script.
for result in log_output:
- print >> log_filehandle, divider
- print >> log_filehandle, 'command line:\n%s\n' % result[0]
- print >> log_filehandle, divider
- print >> log_filehandle, 'output from process:\n%s\n' % result[1]
+ print >> log_filehandle, divider # noqa: F633
+ print >> log_filehandle, 'command line:\n%s\n' % result[0] # noqa: F633, UP031
+ print >> log_filehandle, divider # noqa: F633
+ print >> log_filehandle, 'output from process:\n%s\n' % result[1] # noqa: F633, UP031
- print >> log_filehandle, divider
- print >> log_filehandle, 'End of Log'
- print >> log_filehandle, divider
+ print >> log_filehandle, divider # noqa: F633
+ print >> log_filehandle, 'End of Log' # noqa: F633
+ print >> log_filehandle, divider # noqa: F633
- workflow_log('Log file: %s' % workflow_log_file)
- workflow_log('End of run.')
+ workflow_log('Log file: %s' % workflow_log_file) # noqa: F405, UP031
+ workflow_log('End of run.') # noqa: F405
diff --git a/modules/Workflow/SiteResponse_workflow.py b/modules/Workflow/SiteResponse_workflow.py
index a83bae801..6a7da92b2 100644
--- a/modules/Workflow/SiteResponse_workflow.py
+++ b/modules/Workflow/SiteResponse_workflow.py
@@ -1,4 +1,4 @@
-# Site response workflow
+# Site response workflow # noqa: INP001, D100
import argparse
import json
@@ -7,13 +7,13 @@
from glob import glob
from pathlib import Path
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
import whale.main as whale
from whale.main import log_div, log_msg
-def main(
+def main( # noqa: D103
run_type,
input_file,
app_registry,
@@ -25,20 +25,20 @@ def main(
log_file,
):
# initialize the log file
- with open(input_file) as f:
+ with open(input_file) as f: # noqa: PTH123
inputs = json.load(f)
if working_dir is not None:
- runDir = working_dir
+ runDir = working_dir # noqa: N806
else:
- runDir = inputs['runDir']
+ runDir = inputs['runDir'] # noqa: N806
- if not os.path.exists(runDir):
- os.mkdir(runDir)
+ if not os.path.exists(runDir): # noqa: PTH110
+ os.mkdir(runDir) # noqa: PTH102
if log_file == 'log.txt':
whale.log_file = runDir + '/log.txt'
else:
whale.log_file = log_file
- with open(whale.log_file, 'w') as f:
+ with open(whale.log_file, 'w') as f: # noqa: PTH123
f.write('RDT workflow\n')
whale.print_system_info()
@@ -50,7 +50,7 @@ def main(
if force_cleanup:
log_msg('Forced cleanup turned on.')
- WF = whale.Workflow(
+ WF = whale.Workflow( # noqa: N806
run_type,
input_file,
app_registry,
@@ -73,7 +73,7 @@ def main(
)
if bldg_id_filter is not None:
- print(bldg_id_filter)
+ print(bldg_id_filter) # noqa: T201
log_msg(f'Overriding simulation scope; running buildings {bldg_id_filter}')
# If a Min or Max attribute is used when calling the script, we need to
@@ -87,8 +87,8 @@ def main(
building_file = WF.create_building_files()
WF.perform_regional_mapping(building_file)
- # TODO: not elegant code, fix later
- with open(WF.building_file_path) as f:
+ # TODO: not elegant code, fix later # noqa: TD002
+ with open(WF.building_file_path) as f: # noqa: PTH123
bldg_data = json.load(f)
for bldg in bldg_data: # [:1]:
@@ -127,12 +127,12 @@ def main(
# clean up intermediate files from the working directory
WF.cleanup_workdir()
- surfaceMoDir = collect_surface_motion(WF.run_dir, bldg_data)
+ surfaceMoDir = collect_surface_motion(WF.run_dir, bldg_data) # noqa: N806, F841
-def collect_surface_motion(runDir, bldg_data, surfaceMoDir=''):
+def collect_surface_motion(runDir, bldg_data, surfaceMoDir=''): # noqa: N803, D103
if surfaceMoDir == '':
- surfaceMoDir = f'{runDir}/surface_motions/'
+ surfaceMoDir = f'{runDir}/surface_motions/' # noqa: N806
for bldg in bldg_data: # [:1]:
log_msg(bldg)
@@ -140,20 +140,20 @@ def collect_surface_motion(runDir, bldg_data, surfaceMoDir=''):
bldg_id = bldg['id']
if bldg_id is not None:
- mPaths = glob(f'{runDir}/{bldg_id}/workdir.*/EVENT.json')
+ mPaths = glob(f'{runDir}/{bldg_id}/workdir.*/EVENT.json') # noqa: PTH207, N806
- surfMoTmpDir = f'{surfaceMoDir}/{bldg_id}/'
+ surfMoTmpDir = f'{surfaceMoDir}/{bldg_id}/' # noqa: N806
- if not os.path.exists(surfMoTmpDir):
- os.makedirs(surfMoTmpDir)
+ if not os.path.exists(surfMoTmpDir): # noqa: PTH110
+ os.makedirs(surfMoTmpDir) # noqa: PTH103
for p in mPaths:
- simID = p.split('/')[-2].split('.')[-1]
+ simID = p.split('/')[-2].split('.')[-1] # noqa: N806, F841
# shutil.copyfile(p, f"{surfMoTmpDir}/EVENT-{simID}.json")
- newEVENT = {}
+ newEVENT = {} # noqa: N806
# load the event file
- with open(p) as f:
- EVENT_in_All = json.load(f)
+ with open(p) as f: # noqa: PTH123
+ EVENT_in_All = json.load(f) # noqa: N806
newEVENT['name'] = EVENT_in_All['Events'][0]['event_id'].replace(
'x', '-'
@@ -174,7 +174,7 @@ def collect_surface_motion(runDir, bldg_data, surfaceMoDir=''):
]['data']
newEVENT['PGA_y'] = max(newEVENT['data_y'])
- with open(
+ with open( # noqa: PTH123
f"{surfMoTmpDir}/EVENT-{newEVENT['name']}.json", 'w'
) as outfile:
json.dump(newEVENT, outfile)
@@ -185,7 +185,7 @@ def collect_surface_motion(runDir, bldg_data, surfaceMoDir=''):
if __name__ == '__main__':
# Defining the command line arguments
- workflowArgParser = argparse.ArgumentParser(
+ workflowArgParser = argparse.ArgumentParser( # noqa: N816
'Run the NHERI SimCenter workflow for a set of assets.', allow_abbrev=False
)
@@ -205,8 +205,8 @@ def collect_surface_motion(runDir, bldg_data, surfaceMoDir=''):
workflowArgParser.add_argument(
'-r',
'--registry',
- default=os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ default=os.path.join( # noqa: PTH118
+ os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120
'WorkflowApplications.json',
),
help='Path to file containing registered workflow applications',
@@ -220,13 +220,13 @@ def collect_surface_motion(runDir, bldg_data, surfaceMoDir=''):
workflowArgParser.add_argument(
'-d',
'--referenceDir',
- default=os.path.join(os.getcwd(), 'input_data'),
+ default=os.path.join(os.getcwd(), 'input_data'), # noqa: PTH109, PTH118
help='Relative paths in the config file are referenced to this directory.',
)
workflowArgParser.add_argument(
'-w',
'--workDir',
- default=os.path.join(os.getcwd(), 'results'),
+ default=os.path.join(os.getcwd(), 'results'), # noqa: PTH109, PTH118
help='Absolute path to the working directory.',
)
workflowArgParser.add_argument(
@@ -243,11 +243,11 @@ def collect_surface_motion(runDir, bldg_data, surfaceMoDir=''):
)
# Parsing the command line arguments
- wfArgs = workflowArgParser.parse_args()
+ wfArgs = workflowArgParser.parse_args() # noqa: N816
# update the local app dir with the default - if needed
if wfArgs.appDir is None:
- workflow_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
+ workflow_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120
wfArgs.appDir = workflow_dir.parents[1]
if wfArgs.check:
diff --git a/modules/Workflow/WorkflowUtils.py b/modules/Workflow/WorkflowUtils.py
index a3222051c..86eea45be 100644
--- a/modules/Workflow/WorkflowUtils.py
+++ b/modules/Workflow/WorkflowUtils.py
@@ -1,11 +1,11 @@
-# written: fmk, adamzs
+# written: fmk, adamzs # noqa: INP001, D100
# import functions for Python 2.X support
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
@@ -14,27 +14,27 @@
from time import gmtime, strftime
-class WorkFlowInputError(Exception):
+class WorkFlowInputError(Exception): # noqa: D101
def __init__(self, value):
self.value = value
- def __str__(self):
+ def __str__(self): # noqa: D105
return repr(self.value)
try:
- basestring
+ basestring # noqa: B018
except NameError:
basestring = str
-def workflow_log(msg):
+def workflow_log(msg): # noqa: D103
# ISO-8601 format, e.g. 2018-06-16T20:24:04Z
- print('%s %s' % (strftime('%Y-%m-%dT%H:%M:%SZ', gmtime()), msg))
+ print('%s %s' % (strftime('%Y-%m-%dT%H:%M:%SZ', gmtime()), msg)) # noqa: T201, UP031
# function to return result of invoking an application
-def runApplication(application_plus_args):
+def runApplication(application_plus_args): # noqa: N802, D103
if application_plus_args[0] == 'python':
command = f'python "{application_plus_args[1]}" ' + ' '.join(
application_plus_args[2:]
@@ -45,7 +45,7 @@ def runApplication(application_plus_args):
)
try:
- result = subprocess.check_output(
+ result = subprocess.check_output( # noqa: S602
command, stderr=subprocess.STDOUT, shell=True
)
# for line in result.split('\n'):
@@ -57,23 +57,23 @@ def runApplication(application_plus_args):
returncode = e.returncode
if returncode != 0:
- workflow_log('NON-ZERO RETURN CODE: %s' % returncode)
+ workflow_log('NON-ZERO RETURN CODE: %s' % returncode) # noqa: UP031
return command, result, returncode
-def add_full_path(possible_filename):
+def add_full_path(possible_filename): # noqa: D103
if not isinstance(possible_filename, basestring):
return possible_filename
- if os.path.exists(possible_filename):
- if os.path.isdir(possible_filename):
- return os.path.abspath(possible_filename) + '/'
- else:
- return os.path.abspath(possible_filename)
+ if os.path.exists(possible_filename): # noqa: PTH110
+ if os.path.isdir(possible_filename): # noqa: PTH112
+ return os.path.abspath(possible_filename) + '/' # noqa: PTH100
+ else: # noqa: RET505
+ return os.path.abspath(possible_filename) # noqa: PTH100
else:
return possible_filename
-def recursive_iter(obj):
+def recursive_iter(obj): # noqa: D103
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, basestring):
@@ -88,5 +88,5 @@ def recursive_iter(obj):
recursive_iter(item)
-def relative2fullpath(json_object):
+def relative2fullpath(json_object): # noqa: D103
recursive_iter(json_object)
diff --git a/modules/Workflow/changeJSON.py b/modules/Workflow/changeJSON.py
index c97845545..81555601e 100755
--- a/modules/Workflow/changeJSON.py
+++ b/modules/Workflow/changeJSON.py
@@ -1,18 +1,18 @@
-import json
+import json # noqa: EXE002, INP001, D100
import sys
-def main(inputFile, outputFile):
- extraArgs = sys.argv[3:]
+def main(inputFile, outputFile): # noqa: N803, D103
+ extraArgs = sys.argv[3:] # noqa: N806
# initialize the log file
- with open(inputFile) as f:
+ with open(inputFile) as f: # noqa: PTH123
data = json.load(f)
for k, val in zip(extraArgs[0::2], extraArgs[1::2]):
data[k] = val
- with open(outputFile, 'w') as outfile:
+ with open(outputFile, 'w') as outfile: # noqa: PTH123
json.dump(data, outfile)
diff --git a/modules/Workflow/computeResponseSpectrum.py b/modules/Workflow/computeResponseSpectrum.py
index e24faa45c..d17b52882 100644
--- a/modules/Workflow/computeResponseSpectrum.py
+++ b/modules/Workflow/computeResponseSpectrum.py
@@ -1,6 +1,6 @@
"""Simple Python Script to integrate a strong motion record using
the Newmark-Beta method
-"""
+""" # noqa: INP001, D205, D400
import numpy as np
from scipy.constants import g
@@ -8,7 +8,7 @@
from scipy.interpolate import interp1d
-def convert_accel_units(acceleration, from_, to_='cm/s/s'):
+def convert_accel_units(acceleration, from_, to_='cm/s/s'): # noqa: C901
"""Converts acceleration from/to different units
:param acceleration: the acceleration (numeric or numpy array)
:param from_: unit of `acceleration`: string in "g", "m/s/s", "m/s**2",
@@ -17,7 +17,7 @@ def convert_accel_units(acceleration, from_, to_='cm/s/s'):
"m/s^2", "cm/s/s", "cm/s**2" or "cm/s^2". When missing, it defaults
to "cm/s/s"
:return: acceleration converted to the given units (by default, 'cm/s/s')
- """
+ """ # noqa: D205, D400, D401
m_sec_square = ('m/s/s', 'm/s**2', 'm/s^2')
cm_sec_square = ('cm/s/s', 'cm/s**2', 'cm/s^2')
acceleration = np.asarray(acceleration)
@@ -43,8 +43,8 @@ def convert_accel_units(acceleration, from_, to_='cm/s/s'):
if to_ in cm_sec_square:
return acceleration
- raise ValueError(
- 'Unrecognised time history units. '
+ raise ValueError( # noqa: TRY003
+ 'Unrecognised time history units. ' # noqa: EM101
"Should take either ''g'', ''m/s/s'' or ''cm/s/s''"
)
@@ -64,7 +64,7 @@ def get_velocity_displacement(
:returns:
velocity - Velocity Time series (cm/s)
displacement - Displacement Time series (cm)
- """
+ """ # noqa: D205, D400, D401
acceleration = convert_accel_units(acceleration, units)
if velocity is None:
velocity = time_step * cumtrapz(acceleration, initial=0.0)
@@ -74,7 +74,7 @@ def get_velocity_displacement(
class NewmarkBeta:
- """Evaluates the response spectrum using the Newmark-Beta methodology"""
+ """Evaluates the response spectrum using the Newmark-Beta methodology""" # noqa: D400
def __init__(
self,
@@ -96,7 +96,7 @@ def __init__(
Sampling rate of the acceleration
:param str units:
Units of the acceleration time history {"g", "m/s", "cm/s/s"}
- """
+ """ # noqa: D205, D400, D401
self.periods = periods
self.num_per = len(periods)
self.acceleration = convert_accel_units(acceleration, units)
@@ -133,7 +133,7 @@ def run(self):
accel - Acceleration response of Single Degree of Freedom Oscillator
vel - Velocity response of Single Degree of Freedom Oscillator
disp - Displacement response of Single Degree of Freedom Oscillator
- """
+ """ # noqa: D205, D400, D401
omega = (2.0 * np.pi) / self.periods
cval = self.damping * 2.0 * omega
kval = ((2.0 * np.pi) / self.periods) ** 2.0
@@ -162,7 +162,7 @@ def run(self):
}
return self.response_spectrum, time_series, accel, vel, disp
- def _newmark_beta(self, omega, cval, kval):
+ def _newmark_beta(self, omega, cval, kval): # noqa: ARG002
"""Newmark-beta integral
:param numpy.ndarray omega:
Angular period - (2 * pi) / T
@@ -175,7 +175,7 @@ def _newmark_beta(self, omega, cval, kval):
vel - Velocity response of a SDOF oscillator
disp - Displacement response of a SDOF oscillator
a_t - Acceleration response of a SDOF oscillator
- """
+ """ # noqa: D205, D400
# Parameters
dt = self.d_t
ground_acc = self.acceleration
diff --git a/modules/Workflow/createGM4BIM.py b/modules/Workflow/createGM4BIM.py
index 948bb70a6..c01d67f69 100644
--- a/modules/Workflow/createGM4BIM.py
+++ b/modules/Workflow/createGM4BIM.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
#
# This file is part of the RDT Application.
@@ -48,16 +48,16 @@
import numpy as np
import pandas as pd
-from computeResponseSpectrum import *
+from computeResponseSpectrum import * # noqa: F403
-this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
+this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120
main_dir = this_dir.parents[0]
sys.path.insert(0, str(main_dir / 'common'))
-from simcenter_common import *
+from simcenter_common import * # noqa: E402, F403
-def get_scale_factors(input_units, output_units):
- """Determine the scale factor to convert input event to internal event data"""
+def get_scale_factors(input_units, output_units): # noqa: C901
+ """Determine the scale factor to convert input event to internal event data""" # noqa: D400
# special case: if the input unit is not specified then do not do any scaling
if input_units is None:
scale_factors = {'ALL': 1.0}
@@ -69,13 +69,13 @@ def get_scale_factors(input_units, output_units):
unit_length = output_units.get('length', 'inch')
f_length = globals().get(unit_length, None)
if f_length is None:
- raise ValueError(f'Specified length unit not recognized: {unit_length}')
+ raise ValueError(f'Specified length unit not recognized: {unit_length}') # noqa: EM102, TRY003
# if no time unit is specified, 'sec' is assumed
unit_time = output_units.get('time', 'sec')
f_time = globals().get(unit_time, None)
if f_time is None:
- raise ValueError(f'Specified time unit not recognized: {unit_time}')
+ raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: EM102, TRY003
scale_factors = {}
@@ -88,8 +88,8 @@ def get_scale_factors(input_units, output_units):
# get the scale factor to standard units
f_in = globals().get(input_unit, None)
if f_in is None:
- raise ValueError(
- f'Input unit for event files not recognized: {input_unit}'
+ raise ValueError( # noqa: TRY003
+ f'Input unit for event files not recognized: {input_unit}' # noqa: EM102
)
unit_type = None
@@ -98,7 +98,7 @@ def get_scale_factors(input_units, output_units):
unit_type = base_unit_type
if unit_type is None:
- raise ValueError(f'Failed to identify unit type: {input_unit}')
+ raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: EM102, TRY003
# the output unit depends on the unit type
if unit_type == 'acceleration':
@@ -111,8 +111,8 @@ def get_scale_factors(input_units, output_units):
f_out = 1.0 / f_length
else:
- raise ValueError(
- f'Unexpected unit type in workflow: {unit_type}'
+ raise ValueError( # noqa: TRY003
+ f'Unexpected unit type in workflow: {unit_type}' # noqa: EM102
)
# the scale factor is the product of input and output scaling
@@ -123,37 +123,37 @@ def get_scale_factors(input_units, output_units):
return scale_factors
-def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
- if not os.path.isdir(inputDir):
- print(f'input dir: {inputDir} does not exist')
+def createFilesForEventGrid(inputDir, outputDir, removeInputDir): # noqa: C901, N802, N803, D103, PLR0915
+ if not os.path.isdir(inputDir): # noqa: PTH112
+ print(f'input dir: {inputDir} does not exist') # noqa: T201
return 0
- if not os.path.exists(outputDir):
- os.mkdir(outputDir)
+ if not os.path.exists(outputDir): # noqa: PTH110
+ os.mkdir(outputDir) # noqa: PTH102
#
# FMK bug fix - have to copy AIM files back to the inputDir dir as code below assumes they are there
#
extension = 'AIM.json'
- the_dir = os.path.abspath(inputDir)
+ the_dir = os.path.abspath(inputDir) # noqa: PTH100
for item in os.listdir(the_dir):
- item_path = os.path.join(the_dir, item)
- if os.path.isdir(item_path):
- template_dir = os.path.join(item_path, 'templatedir')
+ item_path = os.path.join(the_dir, item) # noqa: PTH118
+ if os.path.isdir(item_path): # noqa: PTH112
+ template_dir = os.path.join(item_path, 'templatedir') # noqa: PTH118
for the_file in os.listdir(template_dir):
if the_file.endswith(extension):
- bim_path = os.path.join(template_dir, the_file)
+ bim_path = os.path.join(template_dir, the_file) # noqa: PTH118
shutil.copy(bim_path, the_dir)
# siteFiles = glob(f"{inputDir}/*BIM.json")
# KZ: changing BIM to AIM
- siteFiles = glob(f'{inputDir}/*AIM.json')
+ siteFiles = glob(f'{inputDir}/*AIM.json') # noqa: PTH207, N806
- GP_file = []
- Longitude = []
- Latitude = []
- id = []
+ GP_file = [] # noqa: N806, F841
+ Longitude = [] # noqa: N806
+ Latitude = [] # noqa: N806
+ id = [] # noqa: A001
sites = []
# site im dictionary
periods = np.array(
@@ -202,7 +202,7 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
'1-PGD-0-1': [],
'1-PGD-0-2': [],
}
- for Ti in periods:
+ for Ti in periods: # noqa: N806
dict_im_all.update(
{
(f'SA({Ti}s)', 0, 1, 'median'): [],
@@ -237,7 +237,7 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
'1-PGD-0-1': [],
'1-PGD-0-2': [],
}
- for Ti in periods:
+ for Ti in periods: # noqa: N806
dict_im.update(
{
(f'SA({Ti}s)', 0, 1, 'median'): [],
@@ -248,16 +248,16 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
)
dict_im_site.update({f'1-SA({Ti}s)-0-1': [], f'1-SA({Ti}s)-0-2': []})
- with open(site) as f:
- All_json = json.load(f)
- generalInfo = All_json['GeneralInformation']
+ with open(site) as f: # noqa: PTH123
+ All_json = json.load(f) # noqa: N806
+ generalInfo = All_json['GeneralInformation'] # noqa: N806
Longitude.append(generalInfo['Longitude'])
Latitude.append(generalInfo['Latitude'])
# siteID = generalInfo['BIM_id']
# KZ: changing BIM to AIM
- siteID = generalInfo['AIM_id']
+ siteID = generalInfo['AIM_id'] # noqa: N806
# get unit info (needed for determining the simulated acc unit)
- unitInfo = All_json['units']
+ unitInfo = All_json['units'] # noqa: N806
# get scaling factor for surface acceleration
acc_unit = {'AccelerationEvent': 'g'}
f_scale_units = get_scale_factors(acc_unit, unitInfo)
@@ -277,12 +277,12 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
id.append(int(siteID))
- siteFileName = f'Site_{siteID}.csv'
+ siteFileName = f'Site_{siteID}.csv' # noqa: N806
sites.append(siteFileName)
- workdirs = glob(f'{inputDir}/{siteID}/workdir.*')
- siteEventFiles = []
- siteEventFactors = []
+ workdirs = glob(f'{inputDir}/{siteID}/workdir.*') # noqa: PTH207
+ siteEventFiles = [] # noqa: N806
+ siteEventFactors = [] # noqa: N806
# initialization
psa_x = []
@@ -295,10 +295,10 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
pgd_y = []
for workdir in workdirs:
- head, sep, sampleID = workdir.partition('workdir.')
+ head, sep, sampleID = workdir.partition('workdir.') # noqa: N806
# print(sampleID)
- eventName = f'Event_{siteID}_{sampleID}'
+ eventName = f'Event_{siteID}_{sampleID}' # noqa: N806
# print(eventName)
shutil.copy(f'{workdir}/fmkEVENT', f'{outputDir}/{eventName}.json')
@@ -306,17 +306,17 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
siteEventFactors.append(1.0)
# compute ground motion intensity measures
- with open(f'{outputDir}/{eventName}.json') as f:
+ with open(f'{outputDir}/{eventName}.json') as f: # noqa: PTH123, PLW2901
cur_gm = json.load(f)
cur_seismograms = cur_gm['Events'][0]['timeSeries']
- num_seismograms = len(cur_seismograms)
+ num_seismograms = len(cur_seismograms) # noqa: F841
# im_X and im_Y
for cur_time_series in cur_seismograms:
dt = cur_time_series.get('dT')
acc = [x / f_scale for x in cur_time_series.get('data')]
- acc_hist = np.array([[dt * x for x in range(len(acc))], acc])
+ acc_hist = np.array([[dt * x for x in range(len(acc))], acc]) # noqa: F841
# get intensity measure
- my_response_spectrum_calc = NewmarkBeta(
+ my_response_spectrum_calc = NewmarkBeta( # noqa: F405
acc, dt, periods, damping=0.05, units='g'
)
tmp, time_series, accel, vel, disp = (
@@ -345,7 +345,7 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
dict_im_site['1-PGV-0-2'] = pgv_y
dict_im_site['1-PGD-0-1'] = pgd_x
dict_im_site['1-PGD-0-2'] = pgd_y
- for jj, Ti in enumerate(periods):
+ for jj, Ti in enumerate(periods): # noqa: N806
cur_sa = f'1-SA({Ti}s)-0-1'
dict_im_site[cur_sa] = [tmp[jj] for tmp in psa_x]
cur_sa = f'1-SA({Ti}s)-0-2'
@@ -426,7 +426,7 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
dict_im[('PGD', 0, 1, 'beta')].append(s_pgd_x)
dict_im[('PGD', 0, 2, 'median')].append(m_pgd_y)
dict_im[('PGD', 0, 2, 'beta')].append(s_pgd_y)
- for jj, Ti in enumerate(periods):
+ for jj, Ti in enumerate(periods): # noqa: N806
cur_sa = f'SA({Ti}s)'
dict_im[(cur_sa, 0, 1, 'median')].append(m_psa_x[jj])
dict_im[(cur_sa, 0, 1, 'beta')].append(s_psa_x[jj])
@@ -445,20 +445,20 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
df_im.to_csv(f'{inputDir}/{siteID}/IM.csv', index=False)
# create site csv
- siteDF = pd.DataFrame(
+ siteDF = pd.DataFrame( # noqa: N806
list(zip(siteEventFiles, siteEventFactors)),
columns=['TH_file', 'factor'],
)
siteDF.to_csv(f'{outputDir}/{siteFileName}', index=False)
# create the EventFile
- gridDF = pd.DataFrame(
+ gridDF = pd.DataFrame( # noqa: N806
list(zip(sites, Longitude, Latitude)),
columns=['GP_file', 'Longitude', 'Latitude'],
)
# change the writing mode to append for paralleling workflow
- if os.path.exists(f'{outputDir}/EventGrid.csv'):
+ if os.path.exists(f'{outputDir}/EventGrid.csv'): # noqa: PTH110
# EventGrid.csv has been created
gridDF.to_csv(
f'{outputDir}/EventGrid.csv', mode='a', index=False, header=False
@@ -467,18 +467,18 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
# EventGrid.csv to be created
gridDF.to_csv(f'{outputDir}/EventGrid.csv', index=False)
# gridDF.to_csv(f"{outputDir}/EventGrid.csv", index=False)
- print(f'EventGrid.csv saved to {outputDir}')
+ print(f'EventGrid.csv saved to {outputDir}') # noqa: T201
# create pandas
- im_csv_path = os.path.dirname(os.path.dirname(outputDir))
+ im_csv_path = os.path.dirname(os.path.dirname(outputDir)) # noqa: PTH120
df_im_all = pd.DataFrame.from_dict(dict_im_all)
try:
- os.mkdir(os.path.join(im_csv_path, 'Results'))
- except:
- print('Results folder already exists')
+ os.mkdir(os.path.join(im_csv_path, 'Results')) # noqa: PTH102, PTH118
+ except: # noqa: E722
+ print('Results folder already exists') # noqa: T201
# KZ: 10/19/2022, minor patch for Buildings
df_im_all.to_csv(
- os.path.join(
+ os.path.join( # noqa: PTH118
im_csv_path,
'Results',
'Buildings',
@@ -487,7 +487,7 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
index=False,
)
df_im_all.to_csv(
- os.path.join(im_csv_path, f'IM_{min(id)}-{max(id)}.csv'),
+ os.path.join(im_csv_path, f'IM_{min(id)}-{max(id)}.csv'), # noqa: PTH118
index=False,
)
@@ -501,7 +501,7 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
if __name__ == '__main__':
# Defining the command line arguments
- workflowArgParser = argparse.ArgumentParser(
+ workflowArgParser = argparse.ArgumentParser( # noqa: N816
'Create ground motions for BIM.', allow_abbrev=False
)
@@ -516,8 +516,8 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
workflowArgParser.add_argument('--removeInput', action='store_true')
# Parsing the command line arguments
- wfArgs = workflowArgParser.parse_args()
+ wfArgs = workflowArgParser.parse_args() # noqa: N816
- print(wfArgs)
+ print(wfArgs) # noqa: T201
# Calling the main function
createFilesForEventGrid(wfArgs.inputDir, wfArgs.outputDir, wfArgs.removeInput)
diff --git a/modules/Workflow/femUQ-OLD.py b/modules/Workflow/femUQ-OLD.py
index ae3e934d6..2aea2a010 100755
--- a/modules/Workflow/femUQ-OLD.py
+++ b/modules/Workflow/femUQ-OLD.py
@@ -1,11 +1,11 @@
-# written: fmk, adamzs
+# written: fmk, adamzs # noqa: EXE002, INP001, D100
# import functions for Python 2.X support
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
@@ -17,78 +17,78 @@
divider = '#' * 80
log_output = []
-from WorkflowUtils import *
+from WorkflowUtils import * # noqa: E402, F403
-def main(run_type, inputFile, applicationsRegistry):
+def main(run_type, inputFile, applicationsRegistry): # noqa: C901, N803, D103, PLR0912, PLR0915
# the whole workflow is wrapped within a 'try' block.
# a number of exceptions (files missing, explicit application failures, etc.) are
# handled explicitly to aid the user.
# But unhandled exceptions case the workflow to stop with an error, handled in the
# exception block way at the bottom of this main() function
try:
- workflow_log(divider)
- workflow_log('Start of run')
- workflow_log(divider)
- workflow_log('workflow input file: %s' % inputFile)
- workflow_log('application registry file: %s' % applicationsRegistry)
- workflow_log('runtype: %s' % run_type)
- workflow_log(divider)
+ workflow_log(divider) # noqa: F405
+ workflow_log('Start of run') # noqa: F405
+ workflow_log(divider) # noqa: F405
+ workflow_log('workflow input file: %s' % inputFile) # noqa: F405, UP031
+ workflow_log('application registry file: %s' % applicationsRegistry) # noqa: F405, UP031
+ workflow_log('runtype: %s' % run_type) # noqa: F405, UP031
+ workflow_log(divider) # noqa: F405
#
# first we parse the applications registry to load all possible applications
# - for each application type we place in a dictionary key being name, value containing path to executable
#
- with open(applicationsRegistry) as data_file:
- registryData = json.load(data_file)
+ with open(applicationsRegistry) as data_file: # noqa: PTH123
+ registryData = json.load(data_file) # noqa: N806
# convert all relative paths to full paths
- A = 'Applications'
- Applications = dict()
- appList = 'Event Modeling EDP Simulation UQ'.split(' ')
- appList = [a + A for a in appList]
+ A = 'Applications' # noqa: N806
+ Applications = dict() # noqa: C408, N806
+ appList = 'Event Modeling EDP Simulation UQ'.split(' ') # noqa: N806
+ appList = [a + A for a in appList] # noqa: N806
for app_type in appList:
if app_type in registryData:
- xApplicationData = registryData[app_type]
- applicationsData = xApplicationData['Applications']
+ xApplicationData = registryData[app_type] # noqa: N806
+ applicationsData = xApplicationData['Applications'] # noqa: N806
for app in applicationsData:
- appName = app['Name']
- appExe = app['ExecutablePath']
+ appName = app['Name'] # noqa: N806
+ appExe = app['ExecutablePath'] # noqa: N806
if app_type not in Applications:
- Applications[app_type] = dict()
+ Applications[app_type] = dict() # noqa: C408
Applications[app_type][appName] = appExe
#
# open input file, and parse json into data
#
- with open(inputFile) as data_file:
+ with open(inputFile) as data_file: # noqa: PTH123
data = json.load(data_file)
# convert all relative paths to full paths
# relative2fullpath(data)
if 'runDir' in data:
- runDIR = data['runDir']
+ runDIR = data['runDir'] # noqa: N806
else:
- raise WorkFlowInputError('Need a runDir Entry')
+ raise WorkFlowInputError('Need a runDir Entry') # noqa: EM101, F405, TRY003, TRY301
if 'remoteAppDir' in data:
- remoteAppDir = data['remoteAppDir']
+ remoteAppDir = data['remoteAppDir'] # noqa: N806
else:
- raise WorkFlowInputError('Need a remoteAppDir Entry')
+ raise WorkFlowInputError('Need a remoteAppDir Entry') # noqa: EM101, F405, TRY003, TRY301
if 'localAppDir' in data:
- localAppDir = data['localAppDir']
+ localAppDir = data['localAppDir'] # noqa: N806
else:
- raise WorkFlowInputError('Need a localAppDir Entry')
+ raise WorkFlowInputError('Need a localAppDir Entry') # noqa: EM101, F405, TRY003, TRY301
#
# before running chdir to templatedir
#
- workflow_log('run Directory: %s' % runDIR)
+ workflow_log('run Directory: %s' % runDIR) # noqa: F405, UP031
os.chdir(runDIR)
os.chdir('templatedir')
@@ -100,7 +100,7 @@ def main(run_type, inputFile, applicationsRegistry):
if 'Applications' in data:
available_apps = data['Applications']
else:
- raise WorkFlowInputError('Need an Applications Entry')
+ raise WorkFlowInputError('Need an Applications Entry') # noqa: EM101, F405, TRY003, TRY301
#
# get events, for each the application and its data .. FOR NOW 1 EVENT
@@ -111,178 +111,178 @@ def main(run_type, inputFile, applicationsRegistry):
for event in events:
if 'EventClassification' in event:
- eventClassification = event['EventClassification']
+ eventClassification = event['EventClassification'] # noqa: N806
if (
- eventClassification == 'Earthquake'
+ eventClassification == 'Earthquake' # noqa: PLR1714
or eventClassification == 'Wind'
):
if 'Application' in event:
- eventApplication = event['Application']
- eventAppData = event['ApplicationData']
- eventData = event['ApplicationData']
+ eventApplication = event['Application'] # noqa: N806
+ eventAppData = event['ApplicationData'] # noqa: N806
+ eventData = event['ApplicationData'] # noqa: N806, F841
if (
- eventApplication
+ eventApplication # noqa: SIM118
in Applications['EventApplications'].keys()
):
- eventAppExe = Applications['EventApplications'].get(
+ eventAppExe = Applications['EventApplications'].get( # noqa: N806
eventApplication
)
- workflow_log(remoteAppDir)
- workflow_log(eventAppExe)
- eventAppExeLocal = posixpath.join(
+ workflow_log(remoteAppDir) # noqa: F405
+ workflow_log(eventAppExe) # noqa: F405
+ eventAppExeLocal = posixpath.join( # noqa: N806
localAppDir, eventAppExe
)
- eventAppExeRemote = posixpath.join(
+ eventAppExeRemote = posixpath.join( # noqa: N806
remoteAppDir, eventAppExe
)
- workflow_log(eventAppExeRemote)
+ workflow_log(eventAppExeRemote) # noqa: F405
else:
- raise WorkFlowInputError(
- 'Event application %s not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Event application %s not in registry' # noqa: UP031
% eventApplication
)
else:
- raise WorkFlowInputError(
- 'Need an EventApplication section'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need an EventApplication section' # noqa: EM101
)
else:
- raise WorkFlowInputError(
- 'Event classification must be Earthquake, not %s'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Event classification must be Earthquake, not %s' # noqa: UP031
% eventClassification
)
else:
- raise WorkFlowInputError('Need Event Classification')
+ raise WorkFlowInputError('Need Event Classification') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need an Events Entry in Applications')
+ raise WorkFlowInputError('Need an Events Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
#
# get modeling application and its data
#
if 'Modeling' in available_apps:
- modelingApp = available_apps['Modeling']
+ modelingApp = available_apps['Modeling'] # noqa: N806
if 'Application' in modelingApp:
- modelingApplication = modelingApp['Application']
+ modelingApplication = modelingApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- modelingAppData = modelingApp['ApplicationData']
+ modelingAppData = modelingApp['ApplicationData'] # noqa: N806
if (
- modelingApplication
+ modelingApplication # noqa: SIM118
in Applications['ModelingApplications'].keys()
):
- modelingAppExe = Applications['ModelingApplications'].get(
+ modelingAppExe = Applications['ModelingApplications'].get( # noqa: N806
modelingApplication
)
- modelingAppExeLocal = posixpath.join(localAppDir, modelingAppExe)
- modelingAppExeRemote = posixpath.join(
+ modelingAppExeLocal = posixpath.join(localAppDir, modelingAppExe) # noqa: N806
+ modelingAppExeRemote = posixpath.join( # noqa: N806
remoteAppDir, modelingAppExe
)
else:
- raise WorkFlowInputError(
- 'Modeling application %s not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY301
+ 'Modeling application %s not in registry' # noqa: UP031
% modelingApplication
)
else:
- raise WorkFlowInputError(
- 'Need a ModelingApplication in Modeling data'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need a ModelingApplication in Modeling data' # noqa: EM101
)
else:
- raise WorkFlowInputError('Need a Modeling Entry in Applications')
+ raise WorkFlowInputError('Need a Modeling Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
#
# get edp application and its data .. CURRENTLY MODELING APP MUST CREATE EDP
#
if 'EDP' in available_apps:
- edpApp = available_apps['EDP']
+ edpApp = available_apps['EDP'] # noqa: N806
if 'Application' in edpApp:
- edpApplication = edpApp['Application']
+ edpApplication = edpApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- edpAppData = edpApp['ApplicationData']
- if edpApplication in Applications['EDPApplications'].keys():
- edpAppExe = Applications['EDPApplications'].get(edpApplication)
- edpAppExeLocal = posixpath.join(localAppDir, edpAppExe)
- edpAppExeRemote = posixpath.join(remoteAppDir, edpAppExe)
+ edpAppData = edpApp['ApplicationData'] # noqa: N806
+ if edpApplication in Applications['EDPApplications'].keys(): # noqa: SIM118
+ edpAppExe = Applications['EDPApplications'].get(edpApplication) # noqa: N806
+ edpAppExeLocal = posixpath.join(localAppDir, edpAppExe) # noqa: N806
+ edpAppExeRemote = posixpath.join(remoteAppDir, edpAppExe) # noqa: N806
else:
- raise WorkFlowInputError(
- f'EDP application {edpApplication} not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ f'EDP application {edpApplication} not in registry' # noqa: EM102
)
else:
- raise WorkFlowInputError('Need an EDPApplication in EDP data')
+ raise WorkFlowInputError('Need an EDPApplication in EDP data') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need an EDP Entry in Applications')
+ raise WorkFlowInputError('Need an EDP Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
#
# get simulation application and its data
#
if 'Simulation' in available_apps:
- simulationApp = available_apps['Simulation']
+ simulationApp = available_apps['Simulation'] # noqa: N806
if 'Application' in simulationApp:
- simulationApplication = simulationApp['Application']
+ simulationApplication = simulationApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- simAppData = simulationApp['ApplicationData']
+ simAppData = simulationApp['ApplicationData'] # noqa: N806
if (
- simulationApplication
+ simulationApplication # noqa: SIM118
in Applications['SimulationApplications'].keys()
):
- simAppExe = Applications['SimulationApplications'].get(
+ simAppExe = Applications['SimulationApplications'].get( # noqa: N806
simulationApplication
)
- simAppExeLocal = posixpath.join(localAppDir, simAppExe)
- simAppExeRemote = posixpath.join(remoteAppDir, simAppExe)
+ simAppExeLocal = posixpath.join(localAppDir, simAppExe) # noqa: N806
+ simAppExeRemote = posixpath.join(remoteAppDir, simAppExe) # noqa: N806
else:
- raise WorkFlowInputError(
- f'Simulation application {simulationApplication} not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ f'Simulation application {simulationApplication} not in registry' # noqa: EM102
)
else:
- raise WorkFlowInputError(
- 'Need an SimulationApplication in Simulation data'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ 'Need an SimulationApplication in Simulation data' # noqa: EM101
)
else:
- raise WorkFlowInputError('Need a Simulation Entry in Applications')
+ raise WorkFlowInputError('Need a Simulation Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
if 'UQ' in available_apps:
- uqApp = available_apps['UQ']
+ uqApp = available_apps['UQ'] # noqa: N806
if 'Application' in uqApp:
- uqApplication = uqApp['Application']
+ uqApplication = uqApp['Application'] # noqa: N806
# check modeling app in registry, if so get full executable path
- uqAppData = uqApp['ApplicationData']
- if uqApplication in Applications['UQApplications'].keys():
- uqAppExe = Applications['UQApplications'].get(uqApplication)
- uqAppExeLocal = posixpath.join(localAppDir, uqAppExe)
- uqAppExeRemote = posixpath.join(localAppDir, uqAppExe)
+ uqAppData = uqApp['ApplicationData'] # noqa: N806
+ if uqApplication in Applications['UQApplications'].keys(): # noqa: SIM118
+ uqAppExe = Applications['UQApplications'].get(uqApplication) # noqa: N806
+ uqAppExeLocal = posixpath.join(localAppDir, uqAppExe) # noqa: N806
+ uqAppExeRemote = posixpath.join(localAppDir, uqAppExe) # noqa: N806, F841
else:
- raise WorkFlowInputError(
- f'UQ application {uqApplication} not in registry'
+ raise WorkFlowInputError( # noqa: F405, TRY003, TRY301
+ f'UQ application {uqApplication} not in registry' # noqa: EM102
)
else:
- raise WorkFlowInputError('Need a UQApplication in UQ data')
+ raise WorkFlowInputError('Need a UQApplication in UQ data') # noqa: EM101, F405, TRY003, TRY301
else:
- raise WorkFlowInputError('Need a Simulation Entry in Applications')
+ raise WorkFlowInputError('Need a Simulation Entry in Applications') # noqa: EM101, F405, TRY003, TRY301
- workflow_log('SUCCESS: Parsed workflow input')
- workflow_log(divider)
+ workflow_log('SUCCESS: Parsed workflow input') # noqa: F405
+ workflow_log(divider) # noqa: F405
#
# now invoke the applications
@@ -295,18 +295,18 @@ def main(run_type, inputFile, applicationsRegistry):
# - perform Simulation
# - getDL
- bimFILE = 'dakota.json'
- eventFILE = 'EVENT.json'
- samFILE = 'SAM.json'
- edpFILE = 'EDP.json'
- simFILE = 'SIM.json'
- driverFile = 'driver'
+ bimFILE = 'dakota.json' # noqa: N806
+ eventFILE = 'EVENT.json' # noqa: N806
+ samFILE = 'SAM.json' # noqa: N806
+ edpFILE = 'EDP.json' # noqa: N806
+ simFILE = 'SIM.json' # noqa: N806
+ driverFile = 'driver' # noqa: N806
# open driver file & write building app (minus the --getRV) to it
- driverFILE = open(driverFile, 'w')
+ driverFILE = open(driverFile, 'w') # noqa: SIM115, PTH123, N806
# get RV for event
- eventAppDataList = [
+ eventAppDataList = [ # noqa: N806
f'"{eventAppExeRemote}"',
'--filenameBIM',
bimFILE,
@@ -316,13 +316,13 @@ def main(run_type, inputFile, applicationsRegistry):
if eventAppExe.endswith('.py'):
eventAppDataList.insert(0, 'python')
- for key in eventAppData.keys():
+ for key in eventAppData.keys(): # noqa: SIM118
eventAppDataList.append('--' + key)
value = eventAppData.get(key)
eventAppDataList.append('' + value)
for item in eventAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
eventAppDataList.append('--getRV')
@@ -331,11 +331,11 @@ def main(run_type, inputFile, applicationsRegistry):
else:
eventAppDataList[0] = '' + eventAppExeLocal
- command, result, returncode = runApplication(eventAppDataList)
+ command, result, returncode = runApplication(eventAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# get RV for building model
- modelAppDataList = [
+ modelAppDataList = [ # noqa: N806
f'"{modelingAppExeRemote}"',
'--filenameBIM',
bimFILE,
@@ -348,12 +348,12 @@ def main(run_type, inputFile, applicationsRegistry):
if modelingAppExe.endswith('.py'):
modelAppDataList.insert(0, 'python')
- for key in modelingAppData.keys():
+ for key in modelingAppData.keys(): # noqa: SIM118
modelAppDataList.append('--' + key)
modelAppDataList.append('' + modelingAppData.get(key))
for item in modelAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
modelAppDataList.append('--getRV')
@@ -363,11 +363,11 @@ def main(run_type, inputFile, applicationsRegistry):
else:
modelAppDataList[0] = modelingAppExeLocal
- command, result, returncode = runApplication(modelAppDataList)
+ command, result, returncode = runApplication(modelAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# get RV for EDP!
- edpAppDataList = [
+ edpAppDataList = [ # noqa: N806
f'"{edpAppExeRemote}"',
'--filenameBIM',
bimFILE,
@@ -382,12 +382,12 @@ def main(run_type, inputFile, applicationsRegistry):
if edpAppExe.endswith('.py'):
edpAppDataList.insert(0, 'python')
- for key in edpAppData.keys():
+ for key in edpAppData.keys(): # noqa: SIM118
edpAppDataList.append('--' + key)
edpAppDataList.append('' + edpAppData.get(key))
for item in edpAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
if edpAppExe.endswith('.py'):
@@ -396,11 +396,11 @@ def main(run_type, inputFile, applicationsRegistry):
edpAppDataList[0] = edpAppExeLocal
edpAppDataList.append('--getRV')
- command, result, returncode = runApplication(edpAppDataList)
+ command, result, returncode = runApplication(edpAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# get RV for Simulation
- simAppDataList = [
+ simAppDataList = [ # noqa: N806
f'"{simAppExeRemote}"',
'--filenameBIM',
bimFILE,
@@ -417,12 +417,12 @@ def main(run_type, inputFile, applicationsRegistry):
if simAppExe.endswith('.py'):
simAppDataList.insert(0, 'python')
- for key in simAppData.keys():
+ for key in simAppData.keys(): # noqa: SIM118
simAppDataList.append('--' + key)
simAppDataList.append('' + simAppData.get(key))
for item in simAppDataList:
- driverFILE.write('%s ' % item)
+ driverFILE.write('%s ' % item) # noqa: UP031
driverFILE.write('\n')
simAppDataList.append('--getRV')
@@ -431,13 +431,13 @@ def main(run_type, inputFile, applicationsRegistry):
else:
simAppDataList[0] = simAppExeLocal
- command, result, returncode = runApplication(simAppDataList)
+ command, result, returncode = runApplication(simAppDataList) # noqa: F405
log_output.append([command, result, returncode])
# perform the simulation
driverFILE.close()
- uqAppDataList = [
+ uqAppDataList = [ # noqa: N806
f'"{uqAppExeLocal}"',
'--filenameBIM',
bimFILE,
@@ -460,7 +460,7 @@ def main(run_type, inputFile, applicationsRegistry):
uqAppDataList.append('--runType')
uqAppDataList.append(run_type)
- for key in uqAppData.keys():
+ for key in uqAppData.keys(): # noqa: SIM118
uqAppDataList.append('--' + key)
value = uqAppData.get(key)
if isinstance(value, string_types):
@@ -468,50 +468,50 @@ def main(run_type, inputFile, applicationsRegistry):
else:
uqAppDataList.append('' + str(value))
- if run_type == 'run' or run_type == 'set_up':
- workflow_log('Running Simulation...')
- workflow_log(' '.join(uqAppDataList))
- command, result, returncode = runApplication(uqAppDataList)
+ if run_type == 'run' or run_type == 'set_up': # noqa: PLR1714
+ workflow_log('Running Simulation...') # noqa: F405
+ workflow_log(' '.join(uqAppDataList)) # noqa: F405
+ command, result, returncode = runApplication(uqAppDataList) # noqa: F405
log_output.append([command, result, returncode])
- workflow_log('Simulation ended...')
+ workflow_log('Simulation ended...') # noqa: F405
else:
- workflow_log('Setup run only. No simulation performed.')
+ workflow_log('Setup run only. No simulation performed.') # noqa: F405
- except WorkFlowInputError as e:
- print('workflow error: %s' % e.value)
- workflow_log('workflow error: %s' % e.value)
- workflow_log(divider)
- exit(1)
+ except WorkFlowInputError as e: # noqa: F405
+ print('workflow error: %s' % e.value) # noqa: T201, UP031
+ workflow_log('workflow error: %s' % e.value) # noqa: F405, UP031
+ workflow_log(divider) # noqa: F405
+ exit(1) # noqa: PLR1722
# unhandled exceptions are handled here
except Exception as e:
- print('workflow error: %s' % e.value)
- workflow_log('unhandled exception... exiting')
+ print('workflow error: %s' % e.value) # noqa: T201, UP031
+ workflow_log('unhandled exception... exiting') # noqa: F405
raise
if __name__ == '__main__':
- if len(sys.argv) != 4:
- print('\nNeed three arguments, e.g.:\n')
- print(
- ' python %s action workflowinputfile.json workflowapplications.json'
+ if len(sys.argv) != 4: # noqa: PLR2004
+ print('\nNeed three arguments, e.g.:\n') # noqa: T201
+ print( # noqa: T201
+ ' python %s action workflowinputfile.json workflowapplications.json' # noqa: UP031
% sys.argv[0]
)
- print('\nwhere: action is either check or run\n')
- exit(1)
+ print('\nwhere: action is either check or run\n') # noqa: T201
+ exit(1) # noqa: PLR1722
run_type = sys.argv[1]
- inputFile = sys.argv[2]
- applicationsRegistry = sys.argv[3]
+ inputFile = sys.argv[2] # noqa: N816
+ applicationsRegistry = sys.argv[3] # noqa: N816
main(run_type, inputFile, applicationsRegistry)
- workflow_log_file = 'workflow-log-%s.txt' % (
+ workflow_log_file = 'workflow-log-%s.txt' % ( # noqa: UP031
strftime('%Y-%m-%d-%H-%M-%S-utc', gmtime())
)
- log_filehandle = open(workflow_log_file, 'w')
+ log_filehandle = open(workflow_log_file, 'w') # noqa: SIM115, PTH123
- print(type(log_filehandle))
+ print(type(log_filehandle)) # noqa: T201
print(divider, file=log_filehandle)
print('Start of Log', file=log_filehandle)
print(divider, file=log_filehandle)
@@ -519,13 +519,13 @@ def main(run_type, inputFile, applicationsRegistry):
# nb: log_output is a global variable, defined at the top of this script.
for result in log_output:
print(divider, file=log_filehandle)
- print('command line:\n%s\n' % result[0], file=log_filehandle)
+ print('command line:\n%s\n' % result[0], file=log_filehandle) # noqa: UP031
print(divider, file=log_filehandle)
- print('output from process:\n%s\n' % result[1], file=log_filehandle)
+ print('output from process:\n%s\n' % result[1], file=log_filehandle) # noqa: UP031
print(divider, file=log_filehandle)
print('End of Log', file=log_filehandle)
print(divider, file=log_filehandle)
- workflow_log('Log file: %s' % workflow_log_file)
- workflow_log('End of run.')
+ workflow_log('Log file: %s' % workflow_log_file) # noqa: F405, UP031
+ workflow_log('End of run.') # noqa: F405
diff --git a/modules/Workflow/femUQ.py b/modules/Workflow/femUQ.py
index 2bf22411f..439d3680f 100755
--- a/modules/Workflow/femUQ.py
+++ b/modules/Workflow/femUQ.py
@@ -1,4 +1,4 @@
-#
+# # noqa: EXE002, INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -46,25 +46,25 @@
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
import whale.main as whale
from whale.main import log_div, log_msg
-def main(run_type, input_file, app_registry):
+def main(run_type, input_file, app_registry): # noqa: D103
# initialize the log file
- with open(input_file) as f:
+ with open(input_file) as f: # noqa: PTH123
inputs = json.load(f)
- runDir = inputs['runDir']
+ runDir = inputs['runDir'] # noqa: N806
whale.log_file = runDir + '/log.txt'
- with open(whale.log_file, 'w') as f:
+ with open(whale.log_file, 'w') as f: # noqa: PTH123
f.write('femUQ workflow\n')
# echo the inputs
@@ -72,7 +72,7 @@ def main(run_type, input_file, app_registry):
log_msg('Started running the workflow script')
log_msg(log_div)
- WF = whale.Workflow(
+ WF = whale.Workflow( # noqa: N806
run_type,
input_file,
app_registry,
@@ -93,13 +93,13 @@ def main(run_type, input_file, app_registry):
if __name__ == '__main__':
- if len(sys.argv) != 4:
- print('\nNeed three arguments, e.g.:\n')
- print(
- ' python %s action workflowinputfile.json workflowapplications.json'
+ if len(sys.argv) != 4: # noqa: PLR2004
+ print('\nNeed three arguments, e.g.:\n') # noqa: T201
+ print( # noqa: T201
+ ' python %s action workflowinputfile.json workflowapplications.json' # noqa: UP031
% sys.argv[0]
)
- print('\nwhere: action is either check or run\n')
- exit(1)
+ print('\nwhere: action is either check or run\n') # noqa: T201
+ exit(1) # noqa: PLR1722
main(run_type=sys.argv[1], input_file=sys.argv[2], app_registry=sys.argv[3])
diff --git a/modules/Workflow/qWHALE.py b/modules/Workflow/qWHALE.py
index d44fb9eb4..831cb3582 100755
--- a/modules/Workflow/qWHALE.py
+++ b/modules/Workflow/qWHALE.py
@@ -1,4 +1,4 @@
-#
+# # noqa: EXE002, INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -46,22 +46,22 @@
import sys
from pathlib import Path
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
import whale.main as whale
from whale.main import log_div, log_msg
-def main(run_type, input_file, app_registry, working_dir, app_dir, log_file):
+def main(run_type, input_file, app_registry, working_dir, app_dir, log_file): # noqa: ARG001, D103
# initialize the log file
- with open(input_file) as f:
+ with open(input_file) as f: # noqa: PTH123
inputs = json.load(f)
- runDir = inputs['runDir']
+ runDir = inputs['runDir'] # noqa: N806
if working_dir is not None:
- runDir = working_dir
+ runDir = working_dir # noqa: N806
else:
- runDir = inputs['runDir']
+ runDir = inputs['runDir'] # noqa: N806
whale.log_file = runDir + '/log.txt'
# initialize log file
@@ -80,7 +80,7 @@ def main(run_type, input_file, app_registry, working_dir, app_dir, log_file):
log_msg('Started running the workflow script')
log_div()
- WF = whale.Workflow(
+ WF = whale.Workflow( # noqa: N806
run_type,
input_file,
app_registry,
@@ -112,7 +112,7 @@ def main(run_type, input_file, app_registry, working_dir, app_dir, log_file):
# Defining the command line arguments
- workflowArgParser = argparse.ArgumentParser(
+ workflowArgParser = argparse.ArgumentParser( # noqa: N816
'Run the NHERI SimCenter sWHALE workflow for a single asset.',
allow_abbrev=False,
)
@@ -125,8 +125,8 @@ def main(run_type, input_file, app_registry, working_dir, app_dir, log_file):
)
workflowArgParser.add_argument(
'registry',
- default=os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ default=os.path.join( # noqa: PTH118
+ os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120
'WorkflowApplications.json',
),
help='Path to file containing registered workflow applications',
@@ -151,11 +151,11 @@ def main(run_type, input_file, app_registry, working_dir, app_dir, log_file):
)
# Parsing the command line arguments
- wfArgs = workflowArgParser.parse_args()
+ wfArgs = workflowArgParser.parse_args() # noqa: N816
# update the local app dir with the default - if needed
if wfArgs.appDir is None:
- workflow_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
+ workflow_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120
wfArgs.appDir = workflow_dir.parents[1]
# Calling the main workflow method and passing the parsed arguments
diff --git a/modules/Workflow/rWHALE.py b/modules/Workflow/rWHALE.py
index ee8aefe6f..2e3686152 100644
--- a/modules/Workflow/rWHALE.py
+++ b/modules/Workflow/rWHALE.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -50,7 +50,7 @@
import sys
from pathlib import Path
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
import importlib
@@ -59,7 +59,7 @@
from whale.main import log_div, log_msg
-def main(
+def main( # noqa: C901, D103
run_type,
input_file,
app_registry,
@@ -69,10 +69,10 @@ def main(
working_dir,
app_dir,
log_file,
- site_response,
- parallelType,
- mpiExec,
- numPROC,
+ site_response, # noqa: ARG001
+ parallelType, # noqa: N803
+ mpiExec, # noqa: N803
+ numPROC, # noqa: N803
):
#
# check if running in a parallel mpi job
@@ -83,9 +83,9 @@ def main(
# - else set numP = 1, procID = 0 and doParallel = False
#
- numP = 1
- procID = 0
- doParallel = False
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
+ doParallel = False # noqa: N806
mpi_spec = importlib.util.find_spec('mpi4py')
found = mpi_spec is not None
@@ -93,26 +93,26 @@ def main(
from mpi4py import MPI
comm = MPI.COMM_WORLD
- numP = comm.Get_size()
- procID = comm.Get_rank()
- if numP < 2:
- doParallel = False
- numP = 1
- procID = 0
+ numP = comm.Get_size() # noqa: N806
+ procID = comm.Get_rank() # noqa: N806
+ if numP < 2: # noqa: PLR2004
+ doParallel = False # noqa: N806
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
else:
- doParallel = True
+ doParallel = True # noqa: N806
# save the reference dir in the input file
- with open(input_file, encoding='utf-8') as f:
- inputs = json.load(f)
+ with open(input_file, encoding='utf-8') as f: # noqa: PTH123
+ inputs = json.load(f) # noqa: F841
- # TODO: if the ref dir is needed, do NOT save it to the input file, store it
+ # TODO: if the ref dir is needed, do NOT save it to the input file, store it # noqa: TD002
# somewhere else in a file that i not shared among processes
# inputs['refDir'] = reference_dir
# with open(input_file, 'w') as f:
# json.dump(inputs, f, indent=2)
- # TODO: remove the commented section below, I only kept it for now to make
+ # TODO: remove the commented section below, I only kept it for now to make # noqa: TD002
# sure it is not needed
# if working_dir is not None:
@@ -120,11 +120,11 @@ def main(
# else:
# runDir = inputs['runDir']
- if not os.path.exists(working_dir):
- os.mkdir(working_dir)
+ if not os.path.exists(working_dir): # noqa: PTH110
+ os.mkdir(working_dir) # noqa: PTH102
# initialize log file
- if parallelType == 'parSETUP' or parallelType == 'seqRUN':
+ if parallelType == 'parSETUP' or parallelType == 'seqRUN': # noqa: PLR1714
if log_file == 'log.txt':
log_file_path = working_dir + '/log.txt'
else:
@@ -152,7 +152,7 @@ def main(
if force_cleanup:
log_msg('Forced cleanup turned on.')
- WF = whale.Workflow(
+ WF = whale.Workflow( # noqa: N806
run_type,
input_file,
app_registry,
@@ -185,23 +185,23 @@ def main(
WF.workflow_apps['Building'].pref['filter'] = bldg_id_filter
# initialize the working directory
- if parallelType == 'seqRUN' or parallelType == 'parSETUP':
+ if parallelType == 'seqRUN' or parallelType == 'parSETUP': # noqa: PLR1714
WF.init_workdir()
# prepare the basic inputs for individual assets
- if parallelType == 'seqRUN' or parallelType == 'parSETUP':
+ if parallelType == 'seqRUN' or parallelType == 'parSETUP': # noqa: PLR1714
asset_files = WF.create_asset_files()
if parallelType != 'parSETUP':
asset_files = WF.augment_asset_files()
# run the regional event & do mapping
- if parallelType == 'seqRUN' or parallelType == 'parSETUP':
+ if parallelType == 'seqRUN' or parallelType == 'parSETUP': # noqa: PLR1714
# run event
WF.perform_regional_event()
# now for each asset, do regional mapping
- for asset_type, assetIt in asset_files.items():
+ for asset_type, assetIt in asset_files.items(): # noqa: N806
WF.perform_regional_mapping(assetIt, asset_type)
if parallelType == 'parSETUP':
@@ -209,31 +209,31 @@ def main(
# now for each asset run dl workflow .. in parallel if requested
count = 0
- for asset_type, assetIt in asset_files.items():
+ for asset_type, assetIt in asset_files.items(): # noqa: N806
# perform the regional mapping
# WF.perform_regional_mapping(assetIt, asset_type)
- # TODO: not elegant code, fix later
- with open(assetIt, encoding='utf-8') as f:
+ # TODO: not elegant code, fix later # noqa: TD002
+ with open(assetIt, encoding='utf-8') as f: # noqa: PTH123
asst_data = json.load(f)
# Sometimes multiple asset types need to be analyzed together, e.g., pipelines and nodes in a water network
run_asset_type = asset_type
- if asset_type == 'Buildings' or asset_type == 'TransportationNetwork':
+ if asset_type == 'Buildings' or asset_type == 'TransportationNetwork': # noqa: PLR1714
pass
elif asset_type == 'WaterNetworkNodes':
continue # Run the nodes with the pipelines, i.e., the water distribution network
elif asset_type == 'WaterNetworkPipelines':
run_asset_type = 'WaterDistributionNetwork' # Run the pipelines with the entire water distribution network
else:
- print('No support for asset type: ', asset_type)
+ print('No support for asset type: ', asset_type) # noqa: T201
# The preprocess app sequence (previously get_RV)
preprocess_app_sequence = ['Event', 'Modeling', 'EDP', 'Simulation']
# The workflow app sequence
- WF_app_sequence = ['Event', 'Modeling', 'EDP', 'Simulation']
+ WF_app_sequence = ['Event', 'Modeling', 'EDP', 'Simulation'] # noqa: N806
# For each asset
for asst in asst_data:
if count % numP == procID:
@@ -258,12 +258,12 @@ def main(
count = count + 1
# wait for every process to finish
- if doParallel == True:
+ if doParallel == True: # noqa: E712
comm.Barrier()
# aggregate results
if (
- asset_type == 'Buildings'
+ asset_type == 'Buildings' # noqa: PLR1714
or asset_type == 'TransportationNetwork'
or asset_type == 'WaterDistributionNetwork'
):
@@ -271,7 +271,7 @@ def main(
elif asset_type == 'WaterNetworkPipelines':
# Provide the headers and out types
- headers = dict(DV=[0])
+ headers = dict(DV=[0]) # noqa: C408
out_types = ['DV']
@@ -283,7 +283,7 @@ def main(
headers=headers,
)
- if doParallel == True:
+ if doParallel == True: # noqa: E712
comm.Barrier()
WF.combine_assets_results(asset_files)
@@ -292,7 +292,7 @@ def main(
# add system performance
#
system_performance_performed = False
- for asset_type in asset_files.keys():
+ for asset_type in asset_files.keys(): # noqa: SIM118
performed = WF.perform_system_performance_assessment(asset_type)
if performed:
system_performance_performed = True
@@ -311,7 +311,7 @@ def main(
if procID == 0:
WF.cleanup_workdir()
- if doParallel == True:
+ if doParallel == True: # noqa: E712
comm.Barrier()
log_msg('Workflow completed.')
@@ -322,7 +322,7 @@ def main(
if __name__ == '__main__':
# Defining the command line arguments
- workflowArgParser = argparse.ArgumentParser(
+ workflowArgParser = argparse.ArgumentParser( # noqa: N816
'Run the NHERI SimCenter rWHALE workflow for a set of assets.',
allow_abbrev=False,
)
@@ -343,8 +343,8 @@ def main(
workflowArgParser.add_argument(
'-r',
'--registry',
- default=os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ default=os.path.join( # noqa: PTH118
+ os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120
'WorkflowApplications.json',
),
help='Path to file containing registered workflow applications',
@@ -358,13 +358,13 @@ def main(
workflowArgParser.add_argument(
'-d',
'--referenceDir',
- default=os.path.join(os.getcwd(), 'input_data'),
+ default=os.path.join(os.getcwd(), 'input_data'), # noqa: PTH109, PTH118
help='Relative paths in the config file are referenced to this directory.',
)
workflowArgParser.add_argument(
'-w',
'--workDir',
- default=os.path.join(os.getcwd(), 'Results'),
+ default=os.path.join(os.getcwd(), 'Results'), # noqa: PTH109, PTH118
help='Absolute path to the working directory.',
)
workflowArgParser.add_argument(
@@ -406,11 +406,11 @@ def main(
)
# Parsing the command line arguments
- wfArgs = workflowArgParser.parse_args()
+ wfArgs = workflowArgParser.parse_args() # noqa: N816
# update the local app dir with the default - if needed
if wfArgs.appDir is None:
- workflow_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
+ workflow_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120
wfArgs.appDir = workflow_dir.parents[1]
if wfArgs.check:
@@ -419,7 +419,7 @@ def main(
run_type = 'runningLocal'
# Calling the main workflow method and passing the parsed arguments
- numPROC = int(wfArgs.numP)
+ numPROC = int(wfArgs.numP) # noqa: N816
main(
run_type=run_type,
diff --git a/modules/Workflow/sWHALE.py b/modules/Workflow/sWHALE.py
index 6962410b3..5f098a3f3 100755
--- a/modules/Workflow/sWHALE.py
+++ b/modules/Workflow/sWHALE.py
@@ -1,4 +1,4 @@
-#
+# # noqa: EXE002, INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -47,22 +47,22 @@
import sys
from pathlib import Path
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
import whale.main as whale
from whale.main import log_div, log_msg
-def runSWhale(
+def runSWhale( # noqa: N802, D103
inputs,
- WF,
- assetID=None,
- assetAIM='AIM.json',
- prep_app_sequence=['Event', 'Modeling', 'EDP', 'Simulation'],
- WF_app_sequence=['Event', 'Modeling', 'EDP', 'Simulation'],
+ WF, # noqa: N803
+ assetID=None, # noqa: N803
+ assetAIM='AIM.json', # noqa: N803
+ prep_app_sequence=['Event', 'Modeling', 'EDP', 'Simulation'], # noqa: B006
+ WF_app_sequence=['Event', 'Modeling', 'EDP', 'Simulation'], # noqa: B006, N803
asset_type=None,
- copy_resources=False,
- force_cleanup=False,
+ copy_resources=False, # noqa: FBT002
+ force_cleanup=False, # noqa: FBT002
):
# update the runDir, if needed
# with open(input_file, 'r', encoding="utf-8") as f:
@@ -137,7 +137,7 @@ def runSWhale(
# When used in rWhale, delete the original AIM since it is the same with asset_id/templatedir/AIM
if assetAIM != 'AIM.json':
- os.remove(assetAIM)
+ os.remove(assetAIM) # noqa: PTH107
if force_cleanup:
# clean up intermediate files from the simulation
WF.cleanup_simdir(assetID)
@@ -147,16 +147,16 @@ def runSWhale(
log_div(prepend_blank_space=False)
-def main(run_type, input_file, app_registry, working_dir, app_dir, log_file):
+def main(run_type, input_file, app_registry, working_dir, app_dir, log_file): # noqa: ARG001, D103
# update the runDir, if needed
- with open(input_file, encoding='utf-8') as f:
+ with open(input_file, encoding='utf-8') as f: # noqa: PTH123
inputs = json.load(f)
- runDir = inputs['runDir']
+ runDir = inputs['runDir'] # noqa: N806
if working_dir is not None:
- runDir = working_dir
+ runDir = working_dir # noqa: N806
else:
- runDir = inputs['runDir']
+ runDir = inputs['runDir'] # noqa: N806
whale.log_file = runDir + '/log.txt'
@@ -181,10 +181,10 @@ def main(run_type, input_file, app_registry, working_dir, app_dir, log_file):
try:
if inputs['DL']['Demands']['DemandFilePath'] is not None:
run_type = 'loss_only'
- except:
+ except: # noqa: S110, E722
pass
- WF = whale.Workflow(
+ WF = whale.Workflow( # noqa: N806
run_type,
input_file,
app_registry,
@@ -222,7 +222,7 @@ def main(run_type, input_file, app_registry, working_dir, app_dir, log_file):
# Defining the command line arguments
- workflowArgParser = argparse.ArgumentParser(
+ workflowArgParser = argparse.ArgumentParser( # noqa: N816
'Run the NHERI SimCenter sWHALE workflow for a single asset.',
allow_abbrev=False,
)
@@ -235,8 +235,8 @@ def main(run_type, input_file, app_registry, working_dir, app_dir, log_file):
)
workflowArgParser.add_argument(
'registry',
- default=os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ default=os.path.join( # noqa: PTH118
+ os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120
'WorkflowApplications.json',
),
help='Path to file containing registered workflow applications',
@@ -261,11 +261,11 @@ def main(run_type, input_file, app_registry, working_dir, app_dir, log_file):
)
# Parsing the command line arguments
- wfArgs = workflowArgParser.parse_args()
+ wfArgs = workflowArgParser.parse_args() # noqa: N816
# update the local app dir with the default - if needed
if wfArgs.appDir is None:
- workflow_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
+ workflow_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120
wfArgs.appDir = workflow_dir.parents[1]
# Calling the main workflow method and passing the parsed arguments
diff --git a/modules/Workflow/siteResponseWHALE.py b/modules/Workflow/siteResponseWHALE.py
index 874bf38f6..0c70356ed 100644
--- a/modules/Workflow/siteResponseWHALE.py
+++ b/modules/Workflow/siteResponseWHALE.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -49,14 +49,14 @@
from createGM4BIM import createFilesForEventGrid
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
import whale.main as whale
from sWHALE import runSWhale
from whale.main import log_div, log_msg
-def main(
+def main( # noqa: C901, D103
run_type,
input_file,
app_registry,
@@ -67,13 +67,13 @@ def main(
app_dir,
log_file,
output_dir,
- parallelType,
- mpiExec,
- numPROC,
+ parallelType, # noqa: N803
+ mpiExec, # noqa: N803
+ numPROC, # noqa: N803
):
- numP = 1
- procID = 0
- doParallel = False
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
+ doParallel = False # noqa: N806
mpi_spec = importlib.util.find_spec('mpi4py')
found = mpi_spec is not None
@@ -81,18 +81,18 @@ def main(
from mpi4py import MPI
comm = MPI.COMM_WORLD
- numP = comm.Get_size()
- procID = comm.Get_rank()
- parallelType = 'parRUN'
- if numP < 2:
- doParallel = False
- numP = 1
- parallelType = 'seqRUN'
- procID = 0
+ numP = comm.Get_size() # noqa: N806
+ procID = comm.Get_rank() # noqa: N806
+ parallelType = 'parRUN' # noqa: N806
+ if numP < 2: # noqa: PLR2004
+ doParallel = False # noqa: N806
+ numP = 1 # noqa: N806
+ parallelType = 'seqRUN' # noqa: N806
+ procID = 0 # noqa: N806
else:
- doParallel = True
+ doParallel = True # noqa: N806
- print(
+ print( # noqa: T201
'siteResponse (doParallel, procID, numP):',
doParallel,
procID,
@@ -102,16 +102,16 @@ def main(
)
# save the reference dir in the input file
- with open(input_file, encoding='utf-8') as f:
+ with open(input_file, encoding='utf-8') as f: # noqa: PTH123
inputs = json.load(f)
- print('WORKING_DIR', working_dir)
+ print('WORKING_DIR', working_dir) # noqa: T201
if procID == 0:
- if not os.path.exists(working_dir):
- os.mkdir(working_dir)
+ if not os.path.exists(working_dir): # noqa: PTH110
+ os.mkdir(working_dir) # noqa: PTH102
- if doParallel == True:
+ if doParallel == True: # noqa: E712
comm.Barrier()
# initialize log file
@@ -143,31 +143,31 @@ def main(
# for the rWHALE workflow
#
- randomVariables = []
- if 'randomVariables' in inputs.keys():
- randomVariables = inputs['randomVariables']
+ randomVariables = [] # noqa: N806
+ if 'randomVariables' in inputs.keys(): # noqa: SIM118
+ randomVariables = inputs['randomVariables'] # noqa: N806
- inputApplications = inputs['Applications']
- regionalApplication = inputApplications['RegionalEvent']
- appData = regionalApplication['ApplicationData']
- regionalData = inputs['RegionalEvent']
+ inputApplications = inputs['Applications'] # noqa: N806
+ regionalApplication = inputApplications['RegionalEvent'] # noqa: N806
+ appData = regionalApplication['ApplicationData'] # noqa: N806
+ regionalData = inputs['RegionalEvent'] # noqa: N806
regionalData['eventFile'] = (
appData['inputEventFilePath'] + '/' + appData['inputEventFile']
)
regionalData['eventFilePath'] = appData['inputEventFilePath']
- siteFilter = appData['filter']
+ siteFilter = appData['filter'] # noqa: N806
# KZ: 10/19/2022, adding new attributes for the refactored whale
- remoteAppDir = inputs.get('remoteAppDir', '')
- localAppDir = inputs.get('localAppDir', '')
+ remoteAppDir = inputs.get('remoteAppDir', '') # noqa: N806
+ localAppDir = inputs.get('localAppDir', '') # noqa: N806
if localAppDir == '':
- localAppDir = remoteAppDir
+ localAppDir = remoteAppDir # noqa: N806
if remoteAppDir == '':
- remoteAppDir = localAppDir
+ remoteAppDir = localAppDir # noqa: N806
- siteResponseInput = {
+ siteResponseInput = { # noqa: N806
'units': inputs['units'],
'outputs': {
'IM': True,
@@ -207,7 +207,7 @@ def main(
}
],
},
- 'UQ': inputs.get('UQ', dict()),
+ 'UQ': inputs.get('UQ', dict()), # noqa: C408
'localAppDir': localAppDir,
'remoteAppDir': remoteAppDir,
'runType': inputs.get('runType', ''),
@@ -228,16 +228,16 @@ def main(
# siteResponseInputFile = 'tmpSiteResponseInput.json'
# siteResponseInputFile = os.path.join(os.path.dirname(input_file),'tmpSiteResponseInput.json')
# KZ: 10/19/2022, fixing the json file path
- siteResponseInputFile = os.path.join(
- os.path.dirname(reference_dir),
+ siteResponseInputFile = os.path.join( # noqa: PTH118, N806
+ os.path.dirname(reference_dir), # noqa: PTH120
'tmpSiteResponseInput.json',
)
if procID == 0:
- with open(siteResponseInputFile, 'w') as json_file:
+ with open(siteResponseInputFile, 'w') as json_file: # noqa: PTH123
json_file.write(json.dumps(siteResponseInput, indent=2))
- WF = whale.Workflow(
+ WF = whale.Workflow( # noqa: N806
run_type,
siteResponseInputFile,
app_registry,
@@ -251,7 +251,7 @@ def main(
)
if bldg_id_filter is not None:
- print(bldg_id_filter)
+ print(bldg_id_filter) # noqa: T201
log_msg(f'Overriding simulation scope; running buildings {bldg_id_filter}')
# If a Min or Max attribute is used when calling the script, we need to
@@ -265,38 +265,38 @@ def main(
# prepare the basic inputs for individual buildings
asset_files = WF.create_asset_files()
- if doParallel == True:
+ if doParallel == True: # noqa: E712
comm.Barrier()
asset_files = WF.augment_asset_files()
if procID == 0:
- for asset_type, assetIt in asset_files.items():
+ for asset_type, assetIt in asset_files.items(): # noqa: N806
# perform the regional mapping
# WF.perform_regional_mapping(assetIt)
# KZ: 10/19/2022, adding the required argument for the new whale
- print('0 STARTING MAPPING')
+ print('0 STARTING MAPPING') # noqa: T201
# FMK _ PARALLEL WF.perform_regional_mapping(assetIt, asset_type, False)
# WF.perform_regional_mapping(assetIt, asset_type)
- WF.perform_regional_mapping(assetIt, asset_type, False)
+ WF.perform_regional_mapping(assetIt, asset_type, False) # noqa: FBT003
# get all other processes to wait till we are here
- if doParallel == True:
+ if doParallel == True: # noqa: E712
comm.Barrier()
- print('BARRIER AFTER PERFORM REGIONAL MAPPING')
+ print('BARRIER AFTER PERFORM REGIONAL MAPPING') # noqa: T201
count = 0
- for asset_type, assetIt in asset_files.items():
- # TODO: not elegant code, fix later
- with open(assetIt, encoding='utf-8') as f:
+ for asset_type, assetIt in asset_files.items(): # noqa: N806
+ # TODO: not elegant code, fix later # noqa: TD002
+ with open(assetIt, encoding='utf-8') as f: # noqa: PTH123
asst_data = json.load(f)
# The preprocess app sequence (previously get_RV)
preprocess_app_sequence = ['Event', 'EDP']
# The workflow app sequence
- WF_app_sequence = ['Assets', 'Event', 'EDP']
+ WF_app_sequence = ['Assets', 'Event', 'EDP'] # noqa: N806
# For each asset
for asst in asst_data:
@@ -307,7 +307,7 @@ def main(
log_div()
# Run sWhale
- print('COUNT: ', count, ' ID: ', procID)
+ print('COUNT: ', count, ' ID: ', procID) # noqa: T201
runSWhale(
inputs=None,
@@ -323,12 +323,12 @@ def main(
count = count + 1
- if doParallel == True:
+ if doParallel == True: # noqa: E712
comm.Barrier()
if procID == 0:
createFilesForEventGrid(
- os.path.join(working_dir, 'Buildings'),
+ os.path.join(working_dir, 'Buildings'), # noqa: PTH118
output_dir,
force_cleanup,
)
@@ -338,7 +338,7 @@ def main(
# KZ: 10/19/2022, chaining bldg_data to asst_data
WF.aggregate_results(asst_data=asst_data)
- if doParallel == True:
+ if doParallel == True: # noqa: E712
comm.Barrier()
# clean up intermediate files from the working directory
@@ -352,32 +352,32 @@ def main(
if __name__ == '__main__':
- pwd1 = os.getcwd()
- if os.path.basename(pwd1) == 'Results':
+ pwd1 = os.getcwd() # noqa: PTH109
+ if os.path.basename(pwd1) == 'Results': # noqa: PTH119
os.chdir('..')
#
# little bit of preprocessing
#
- thisScriptPath = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
- registryFile = thisScriptPath / 'WorkflowApplications.json'
- applicationDir = Path(thisScriptPath).parents[1]
- pwd = os.getcwd()
- currentDir = Path(pwd)
- referenceDir = currentDir / 'input_data'
- siteResponseOutputDir = referenceDir / 'siteResponseWorkingDir'
- siteResponseAggregatedResultsDir = referenceDir / 'siteResponseOutputMotions'
-
- print('PWD: ', pwd)
- print('currentDir: ', currentDir)
- print('referenceDir: ', referenceDir)
- print('siteResponseOutputDir: ', siteResponseOutputDir)
+ thisScriptPath = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120, N816
+ registryFile = thisScriptPath / 'WorkflowApplications.json' # noqa: N816
+ applicationDir = Path(thisScriptPath).parents[1] # noqa: N816
+ pwd = os.getcwd() # noqa: PTH109
+ currentDir = Path(pwd) # noqa: N816
+ referenceDir = currentDir / 'input_data' # noqa: N816
+ siteResponseOutputDir = referenceDir / 'siteResponseWorkingDir' # noqa: N816
+ siteResponseAggregatedResultsDir = referenceDir / 'siteResponseOutputMotions' # noqa: N816
+
+ print('PWD: ', pwd) # noqa: T201
+ print('currentDir: ', currentDir) # noqa: T201
+ print('referenceDir: ', referenceDir) # noqa: T201
+ print('siteResponseOutputDir: ', siteResponseOutputDir) # noqa: T201
#
# parse command line
#
- workflowArgParser = argparse.ArgumentParser(
+ workflowArgParser = argparse.ArgumentParser( # noqa: N816
'Run the NHERI SimCenter rWHALE workflow for a set of assets.',
allow_abbrev=False,
)
@@ -461,11 +461,11 @@ def main(
)
# Parsing the command line arguments
- wfArgs = workflowArgParser.parse_args()
+ wfArgs = workflowArgParser.parse_args() # noqa: N816
# update the local app dir with the default - if needed
if wfArgs.appDir is None:
- workflow_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
+ workflow_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120
wfArgs.appDir = workflow_dir.parents[1]
if wfArgs.check:
@@ -478,8 +478,8 @@ def main(
#
# Calling the main workflow method and passing the parsed arguments
#
- print('FMK siteResponse main: WORKDIR: ', wfArgs.workDir)
- numPROC = int(wfArgs.numP)
+ print('FMK siteResponse main: WORKDIR: ', wfArgs.workDir) # noqa: T201
+ numPROC = int(wfArgs.numP) # noqa: N816
main(
run_type=run_type,
diff --git a/modules/Workflow/whale/__init__.py b/modules/Workflow/whale/__init__.py
index b65e9a13f..e84de852c 100644
--- a/modules/Workflow/whale/__init__.py
+++ b/modules/Workflow/whale/__init__.py
@@ -1,4 +1,4 @@
-#
+# # noqa: D104
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
diff --git a/modules/Workflow/whale/main.py b/modules/Workflow/whale/main.py
index f7927c9a9..5b616b959 100644
--- a/modules/Workflow/whale/main.py
+++ b/modules/Workflow/whale/main.py
@@ -51,7 +51,7 @@
...
-"""
+""" # noqa: D404
import argparse
import importlib
@@ -81,23 +81,23 @@
pp = pprint.PrettyPrinter(indent=4)
# get the absolute path of the whale directory
-whale_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+whale_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # noqa: PTH100, PTH120
-def str2bool(v):
+def str2bool(v): # noqa: D103
# courtesy of Maxim @ stackoverflow
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 'True', 't', 'y', '1'):
return True
- elif v.lower() in ('no', 'false', 'False', 'f', 'n', '0'):
+ elif v.lower() in ('no', 'false', 'False', 'f', 'n', '0'): # noqa: RET505
return False
else:
- raise argparse.ArgumentTypeError('Boolean value expected.')
+ raise argparse.ArgumentTypeError('Boolean value expected.') # noqa: EM101, TRY003
-class Options:
+class Options: # noqa: D101
def __init__(self):
self._log_show_ms = False
self._print_log = False
@@ -105,7 +105,7 @@ def __init__(self):
self.reset_log_strings()
@property
- def log_show_ms(self):
+ def log_show_ms(self): # noqa: D102
return self._log_show_ms
@log_show_ms.setter
@@ -115,19 +115,19 @@ def log_show_ms(self, value):
self.reset_log_strings()
@property
- def log_pref(self):
+ def log_pref(self): # noqa: D102
return self._log_pref
@property
- def log_div(self):
+ def log_div(self): # noqa: D102
return self._log_div
@property
- def log_time_format(self):
+ def log_time_format(self): # noqa: D102
return self._log_time_format
@property
- def log_file(self):
+ def log_file(self): # noqa: D102
return globals()['log_file']
@log_file.setter
@@ -141,24 +141,24 @@ def log_file(self, value):
try:
globals()['log_file'] = str(filepath)
- with open(filepath, 'w', encoding='utf-8') as f:
+ with open(filepath, 'w', encoding='utf-8') as f: # noqa: PTH123
f.write('')
- except:
- raise ValueError(
- f'The filepath provided does not point to a '
+ except: # noqa: E722
+ raise ValueError( # noqa: B904, TRY003
+ f'The filepath provided does not point to a ' # noqa: EM102
f'valid location: {filepath}'
)
@property
- def print_log(self):
+ def print_log(self): # noqa: D102
return self._print_log
@print_log.setter
def print_log(self, value):
self._print_log = str2bool(value)
- def reset_log_strings(self):
+ def reset_log_strings(self): # noqa: D102
if self._log_show_ms:
self._log_time_format = '%H:%M:%S:%f'
self._log_pref = (
@@ -178,7 +178,7 @@ def reset_log_strings(self):
log_file = None
-def set_options(config_options):
+def set_options(config_options): # noqa: D103
if config_options is not None:
for key, value in config_options.items():
if key == 'LogShowMS':
@@ -190,20 +190,20 @@ def set_options(config_options):
# Monkeypatch warnings to get prettier messages
-def _warning(message, category, filename, lineno, file=None, line=None):
+def _warning(message, category, filename, lineno, file=None, line=None): # noqa: ARG001
if '\\' in filename:
file_path = filename.split('\\')
elif '/' in filename:
file_path = filename.split('/')
python_file = '/'.join(file_path[-3:])
- print(f'WARNING in {python_file} at line {lineno}\n{message}\n')
+ print(f'WARNING in {python_file} at line {lineno}\n{message}\n') # noqa: T201
warnings.showwarning = _warning
-def log_div(prepend_timestamp=False, prepend_blank_space=True):
- """Print a divider line to the log file"""
+def log_div(prepend_timestamp=False, prepend_blank_space=True): # noqa: FBT002
+ """Print a divider line to the log file""" # noqa: D400
if prepend_timestamp or prepend_blank_space:
msg = options.log_div
@@ -217,7 +217,7 @@ def log_div(prepend_timestamp=False, prepend_blank_space=True):
)
-def log_msg(msg='', prepend_timestamp=True, prepend_blank_space=True):
+def log_msg(msg='', prepend_timestamp=True, prepend_blank_space=True): # noqa: FBT002
"""Print a message to the screen with the current time as prefix
The time is in ISO-8601 format, e.g. 2018-06-16T20:24:04Z
@@ -227,13 +227,13 @@ def log_msg(msg='', prepend_timestamp=True, prepend_blank_space=True):
msg: string
Message to print.
- """
+ """ # noqa: D400
msg_lines = msg.split('\n')
for msg_i, msg_line in enumerate(msg_lines):
if prepend_timestamp and (msg_i == 0):
formatted_msg = (
- f'{datetime.now().strftime(options.log_time_format)} {msg_line}'
+ f'{datetime.now().strftime(options.log_time_format)} {msg_line}' # noqa: DTZ005
)
elif prepend_timestamp or prepend_blank_space:
formatted_msg = options.log_pref + msg_line
@@ -241,10 +241,10 @@ def log_msg(msg='', prepend_timestamp=True, prepend_blank_space=True):
formatted_msg = msg_line
if options.print_log:
- print(formatted_msg)
+ print(formatted_msg) # noqa: T201
if globals()['log_file'] is not None:
- with open(globals()['log_file'], 'a', encoding='utf-8') as f:
+ with open(globals()['log_file'], 'a', encoding='utf-8') as f: # noqa: PTH123
f.write('\n' + formatted_msg)
@@ -256,20 +256,20 @@ def log_error(msg):
msg: string
Message to print.
- """
+ """ # noqa: D400
log_div()
log_msg('' * (80 - 21 - 6) + ' ERROR')
log_msg(msg)
log_div()
-def print_system_info():
+def print_system_info(): # noqa: D103
log_msg(
'System Information:', prepend_timestamp=False, prepend_blank_space=False
)
log_msg(
f' local time zone: {datetime.utcnow().astimezone().tzinfo}\n'
- f' start time: {datetime.now().strftime("%Y-%m-%dT%H:%M:%S")}\n'
+ f' start time: {datetime.now().strftime("%Y-%m-%dT%H:%M:%S")}\n' # noqa: DTZ005
f' python: {sys.version}\n'
f' numpy: {np.__version__}\n'
f' pandas: {pd.__version__}\n',
@@ -288,7 +288,7 @@ def create_command(command_list, enforced_python=None):
command_list: array of unicode strings
Explain...
- """
+ """ # noqa: D400
if command_list[0] == 'python':
# replace python with...
if enforced_python is None:
@@ -323,12 +323,12 @@ def run_command(command):
command_list: array of unicode strings
Explain...
- """
+ """ # noqa: D400
# If it is a python script, we do not run it, but rather import the main
# function. This ensures that the script is run using the same python
# interpreter that this script uses and it is also faster because we do not
# need to run multiple python interpreters simultaneously.
- Frank_trusts_this_approach = False
+ Frank_trusts_this_approach = False # noqa: N806
if command[:6] == 'python' and Frank_trusts_this_approach:
import importlib # only import this when it's needed
@@ -359,12 +359,12 @@ def run_command(command):
return '', ''
- else:
+ else: # noqa: RET505
# fmk with Shell=True not working on older windows machines, new approach needed for quoted command .. turn into a list
command = shlex.split(command)
try:
- result = subprocess.check_output(
+ result = subprocess.check_output( # noqa: S603
command, stderr=subprocess.STDOUT, text=True
)
returncode = 0
@@ -384,16 +384,16 @@ def run_command(command):
return result, returncode
-def show_warning(warning_msg):
- warnings.warn(UserWarning(warning_msg))
+def show_warning(warning_msg): # noqa: D103
+ warnings.warn(UserWarning(warning_msg)) # noqa: B028
-def resolve_path(target_path, ref_path):
+def resolve_path(target_path, ref_path): # noqa: D103
ref_path = Path(ref_path)
target_path = str(target_path).strip()
- while target_path.startswith('/') or target_path.startswith('\\'):
+ while target_path.startswith('/') or target_path.startswith('\\'): # noqa: PIE810
target_path = target_path[1:]
if target_path == '':
@@ -410,12 +410,12 @@ def resolve_path(target_path, ref_path):
else:
# raise ValueError(
# f"{target_path} does not point to a valid location")
- print(f'{target_path} does not point to a valid location')
+ print(f'{target_path} does not point to a valid location') # noqa: T201
return target_path
-def _parse_app_registry(registry_path, app_types, list_available_apps=False):
+def _parse_app_registry(registry_path, app_types, list_available_apps=False): # noqa: FBT002
"""Load the information about available workflow applications.
Parameters
@@ -447,12 +447,12 @@ def _parse_app_registry(registry_path, app_types, list_available_apps=False):
# open the registry file
log_msg('Loading the json file...', prepend_timestamp=False)
- with open(registry_path, encoding='utf-8') as f:
+ with open(registry_path, encoding='utf-8') as f: # noqa: PTH123
app_registry_data = json.load(f)
log_msg(' OK', prepend_timestamp=False)
# initialize the app registry
- app_registry = dict([(a, dict()) for a in app_types])
+ app_registry = dict([(a, dict()) for a in app_types]) # noqa: C404, C408
log_msg('Loading default values...', prepend_timestamp=False)
@@ -486,7 +486,7 @@ def _parse_app_registry(registry_path, app_types, list_available_apps=False):
log_msg('Available applications:', prepend_timestamp=False)
for app_type, app_list in app_registry.items():
- for app_name, app_object in app_list.items():
+ for app_name, app_object in app_list.items(): # noqa: B007, PERF102
log_msg(f' {app_type} : {app_name}', prepend_timestamp=False)
# pp.pprint(self.app_registry)
@@ -497,11 +497,11 @@ def _parse_app_registry(registry_path, app_types, list_available_apps=False):
return app_registry, default_values
-class WorkFlowInputError(Exception):
+class WorkFlowInputError(Exception): # noqa: D101
def __init__(self, value):
self.value = value
- def __str__(self):
+ def __str__(self): # noqa: D105
return repr(self.value)
@@ -513,7 +513,7 @@ class WorkflowApplication:
Parameters
----------
- """
+ """ # noqa: D414
def __init__(self, app_type, app_info, api_info):
# print('APP_TYPE', app_type)
@@ -525,7 +525,7 @@ def __init__(self, app_type, app_info, api_info):
self.app_type = app_type
self.rel_path = app_info['ExecutablePath']
- if 'RunsParallel' in app_info.keys():
+ if 'RunsParallel' in app_info.keys(): # noqa: SIM118
self.runsParallel = app_info['RunsParallel']
else:
self.runsParallel = False
@@ -534,7 +534,7 @@ def __init__(self, app_type, app_info, api_info):
self.inputs = api_info['Inputs']
self.outputs = api_info['Outputs']
- if 'DefaultValues' in api_info.keys():
+ if 'DefaultValues' in api_info.keys(): # noqa: SIM118
self.defaults = api_info['DefaultValues']
else:
self.defaults = None
@@ -547,11 +547,11 @@ def set_pref(self, preferences, ref_path):
preferences: dictionary
Explain...
- """
+ """ # noqa: D400
self.pref = preferences
# parse the relative paths (if any)
- ASI = [inp['id'] for inp in self.app_spec_inputs]
+ ASI = [inp['id'] for inp in self.app_spec_inputs] # noqa: N806
for preference in list(self.pref.keys()):
if preference in ASI:
input_id = np.where([preference == asi for asi in ASI])[0][0]
@@ -565,7 +565,7 @@ def set_pref(self, preferences, ref_path):
self.pref[preference], ref_path
)
- def get_command_list(self, app_path, force_posix=False):
+ def get_command_list(self, app_path, force_posix=False): # noqa: FBT002, C901
"""Short description
Parameters
@@ -573,7 +573,7 @@ def get_command_list(self, app_path, force_posix=False):
app_path: Path
Explain...
- """
+ """ # noqa: D400
abs_path = Path(app_path) / self.rel_path
# abs_path = posixpath.join(app_path, self.rel_path)
@@ -598,7 +598,7 @@ def get_command_list(self, app_path, force_posix=False):
# If the user also provided an input, let them know that their
# input is invalid
- if in_arg['id'] in self.pref.keys():
+ if in_arg['id'] in self.pref.keys(): # noqa: SIM118
log_msg(
'\nWARNING: Application specific parameters cannot '
'overwrite default workflow\nparameters. See the '
@@ -608,7 +608,7 @@ def get_command_list(self, app_path, force_posix=False):
prepend_blank_space=False,
)
- elif in_arg['id'] in self.pref.keys():
+ elif in_arg['id'] in self.pref.keys(): # noqa: SIM118
arg_value = self.pref[in_arg['id']]
else:
@@ -632,7 +632,7 @@ def get_command_list(self, app_path, force_posix=False):
# If the user also provided an input, let them know that
# their input is invalid
- if out_arg['id'] in self.pref.keys():
+ if out_arg['id'] in self.pref.keys(): # noqa: SIM118
log_msg(
'\nWARNING: Application specific parameters '
'cannot overwrite default workflow\nparameters. '
@@ -642,7 +642,7 @@ def get_command_list(self, app_path, force_posix=False):
prepend_blank_space=False,
)
- elif out_arg['id'] in self.pref.keys():
+ elif out_arg['id'] in self.pref.keys(): # noqa: SIM118
arg_value = self.pref[out_arg['id']]
else:
@@ -653,7 +653,7 @@ def get_command_list(self, app_path, force_posix=False):
else:
arg_list.append(f'{arg_value}')
- ASI_list = [inp['id'] for inp in self.app_spec_inputs]
+ ASI_list = [inp['id'] for inp in self.app_spec_inputs] # noqa: N806
for pref_name, pref_value in self.pref.items():
# only pass those input arguments that are in the registry
if pref_name in ASI_list:
@@ -684,7 +684,7 @@ class Workflow:
app_registry: string
Explain...
- """
+ """ # noqa: D205
def __init__(
self,
@@ -695,9 +695,9 @@ def __init__(
reference_dir=None,
working_dir=None,
app_dir=None,
- parType='seqRUN',
- mpiExec='mpiExec',
- numProc=8,
+ parType='seqRUN', # noqa: N803
+ mpiExec='mpiExec', # noqa: N803
+ numProc=8, # noqa: N803
):
log_msg('Inputs provided:')
log_msg(f'workflow input file: {input_file}', prepend_timestamp=False)
@@ -716,7 +716,7 @@ def __init__(
'WaterDistributionNetwork',
'TransportationNetwork',
]
- self.asset_registry = dict([(a, dict()) for a in self.asset_type_list])
+ self.asset_registry = dict([(a, dict()) for a in self.asset_type_list]) # noqa: C404, C408
self.run_type = run_type
self.input_file = input_file
@@ -727,13 +727,13 @@ def __init__(
self.numProc = numProc
# if parallel setup, open script file to run
- self.inputFilePath = os.path.dirname(input_file)
- parCommandFileName = os.path.join(self.inputFilePath, 'sc_parScript.sh')
+ self.inputFilePath = os.path.dirname(input_file) # noqa: PTH120
+ parCommandFileName = os.path.join(self.inputFilePath, 'sc_parScript.sh') # noqa: PTH118, N806
if parType == 'parSETUP':
- self.parCommandFile = open(parCommandFileName, 'w')
+ self.parCommandFile = open(parCommandFileName, 'w') # noqa: SIM115, PTH123
self.parCommandFile.write('#!/bin/sh' + '\n')
- print(
+ print( # noqa: T201
'WF: parType, mpiExec, numProc: ',
self.parType,
self.mpiExec,
@@ -751,14 +751,14 @@ def __init__(
self.comm = MPI.COMM_WORLD
self.numP = self.comm.Get_size()
self.procID = self.comm.Get_rank()
- if self.numP < 2:
+ if self.numP < 2: # noqa: PLR2004
self.doParallel = False
self.numP = 1
self.procID = 0
else:
self.doParallel = True
- print(
+ print( # noqa: T201
'WF: parType, mpiExec, numProc, do? numP, procID: ',
self.parType,
self.mpiExec,
@@ -798,10 +798,10 @@ def __init__(
self.workflow_assets = {}
self._parse_inputs()
- def __del__(self):
+ def __del__(self): # noqa: D105
# if parallel setup, add command to run this script with parallel option
if self.parType == 'parSETUP':
- inputArgs = sys.argv
+ inputArgs = sys.argv # noqa: N806
length = len(inputArgs)
i = 0
while i < length:
@@ -820,7 +820,7 @@ def __del__(self):
)
self.parCommandFile.close()
- def _register_app_type(self, app_type, app_dict, sub_app=''):
+ def _register_app_type(self, app_type, app_dict, sub_app=''): # noqa: C901
"""Function to register the applications provided in the input file into
memory, i.e., the 'App registry'
@@ -830,18 +830,18 @@ def _register_app_type(self, app_type, app_dict, sub_app=''):
app_dict - dictionary containing app data
- """
+ """ # noqa: D205, D400, D401
if type(app_dict) is not dict:
return
- else:
- for itmKey, itm in app_dict.items():
+ else: # noqa: RET505
+ for itmKey, itm in app_dict.items(): # noqa: N806
self._register_app_type(app_type, itm, itmKey)
# The provided application
app_in = app_dict.get('Application')
# Check to ensure the applications key is provided in the input
- if app_in == None:
+ if app_in == None: # noqa: E711
return
err = "Need to provide the 'Application' key in " + app_type
raise WorkFlowInputError(err)
@@ -849,26 +849,26 @@ def _register_app_type(self, app_type, app_dict, sub_app=''):
# Check to see if the app type is in the application registry
app_type_obj = self.app_registry.get(app_type)
- if app_in == None:
+ if app_in == None: # noqa: E711
return
if app_in == 'None':
return
- if app_type_obj == None:
+ if app_type_obj == None: # noqa: E711
err = 'The application ' + app_type + ' is not found in the app registry'
raise WorkFlowInputError(err)
# Finally check to see if the app registry contains the provided application
- if app_type_obj.get(app_in) == None:
+ if app_type_obj.get(app_in) == None: # noqa: E711
err = (
'Could not find the provided application in the internal app registry, app name: '
+ app_in
)
- print('Error', app_in)
+ print('Error', app_in) # noqa: T201
raise WorkFlowInputError(err)
- appData = app_dict['ApplicationData']
+ appData = app_dict['ApplicationData'] # noqa: N806
#
# for itmKey, itm in appData.items() :
# self._register_app_type(app_type,itm,itmKey)
@@ -878,7 +878,7 @@ def _register_app_type(self, app_type, app_dict, sub_app=''):
# Check if the app object was created successfully
if app_object is None:
- raise WorkFlowInputError(f'Application deep copy failed for {app_type}')
+ raise WorkFlowInputError(f'Application deep copy failed for {app_type}') # noqa: EM102, TRY003
# only assign the app to the workflow if it has an executable
if app_object.rel_path is None:
@@ -916,7 +916,7 @@ def _register_asset(self, asset_type, asset_dict):
app_dict - dictionary containing asset data
- """
+ """ # noqa: D400, D401
# Check to see if the app type is in the application registry
asset_object = self.asset_registry.get(asset_type)
@@ -934,13 +934,13 @@ def _register_asset(self, asset_type, asset_dict):
log_msg(f'Found asset: {asset_type} ', prepend_timestamp=False)
- def _parse_inputs(self):
- """Load the information about the workflow to run"""
+ def _parse_inputs(self): # noqa: C901
+ """Load the information about the workflow to run""" # noqa: D400
log_msg('Parsing workflow input file')
# open input file
log_msg('Loading the json file...', prepend_timestamp=False)
- with open(self.input_file, encoding='utf-8') as f:
+ with open(self.input_file, encoding='utf-8') as f: # noqa: PTH123
input_data = json.load(f)
log_msg(' OK', prepend_timestamp=False)
@@ -991,7 +991,7 @@ def _parse_inputs(self):
default_values = {}
# workflow input is input file
- default_values['workflowInput'] = os.path.basename(self.input_file)
+ default_values['workflowInput'] = os.path.basename(self.input_file) # noqa: PTH119
if default_values is not None:
log_msg(
'The following workflow defaults were overwritten:',
@@ -999,7 +999,7 @@ def _parse_inputs(self):
)
for key, value in default_values.items():
- if key in self.default_values.keys():
+ if key in self.default_values.keys(): # noqa: SIM118
self.default_values[key] = value
else:
@@ -1013,7 +1013,7 @@ def _parse_inputs(self):
'RegionalEvent',
]:
value = input_data.get(shared_key, None)
- if value != None:
+ if value != None: # noqa: E711
self.shared_data.update({shared_key: value})
# parse the location of the run_dir
@@ -1074,21 +1074,21 @@ def _parse_inputs(self):
if 'Applications' in input_data:
requested_apps = input_data['Applications']
else:
- raise WorkFlowInputError('Need an Applications entry in the input file')
+ raise WorkFlowInputError('Need an Applications entry in the input file') # noqa: EM101, TRY003
# create the requested applications
# Events are special because they are in an array
if 'Events' in requested_apps:
if len(requested_apps['Events']) > 1:
- raise WorkFlowInputError(
- 'Currently, WHALE only supports a single event.'
+ raise WorkFlowInputError( # noqa: TRY003
+ 'Currently, WHALE only supports a single event.' # noqa: EM101
)
for event in requested_apps['Events'][
:1
]: # this limitation can be relaxed in the future
if 'EventClassification' in event:
- eventClassification = event['EventClassification']
+ eventClassification = event['EventClassification'] # noqa: N806
if eventClassification in [
'Earthquake',
'Wind',
@@ -1105,7 +1105,7 @@ def _parse_inputs(self):
if app_object is None:
raise WorkFlowInputError(
- 'Application entry missing for {}'.format('Events')
+ 'Application entry missing for {}'.format('Events') # noqa: EM103
)
app_object.set_pref(
@@ -1114,26 +1114,26 @@ def _parse_inputs(self):
self.workflow_apps['Event'] = app_object
else:
- raise WorkFlowInputError(
- 'Currently, only earthquake and wind events are supported. '
+ raise WorkFlowInputError( # noqa: TRY003
+ 'Currently, only earthquake and wind events are supported. ' # noqa: EM102
f'EventClassification must be Earthquake, not {eventClassification}'
)
else:
- raise WorkFlowInputError('Need Event Classification')
+ raise WorkFlowInputError('Need Event Classification') # noqa: EM101, TRY003
# Figure out what types of assets are coming into the analysis
- assetObjs = requested_apps.get('Assets', None)
+ assetObjs = requested_apps.get('Assets', None) # noqa: N806
# Check if an asset object exists
- if assetObjs != None:
+ if assetObjs != None: # noqa: E711
# raise WorkFlowInputError('Need to define the assets for analysis')
# Check if asset list is not empty
if len(assetObjs) == 0:
- raise WorkFlowInputError('The provided asset object is empty')
+ raise WorkFlowInputError('The provided asset object is empty') # noqa: EM101, TRY003
# Iterate through the asset objects
- for assetObj in assetObjs:
+ for assetObj in assetObjs: # noqa: N806
self._register_asset(assetObj, assetObjs[assetObj])
# Iterate through the app type list which is set when you instantiate the workflow
@@ -1153,7 +1153,7 @@ def _parse_inputs(self):
):
self.app_type_list.remove(app_type)
- def recursiveLog(app_type, app_object):
+ def recursiveLog(app_type, app_object): # noqa: N802
if type(app_object) is dict:
for sub_app_type, sub_object in app_object.items():
log_msg(f' {app_type} : ', prepend_timestamp=False)
@@ -1180,32 +1180,32 @@ def create_asset_files(self):
Parameters
----------
- """
+ """ # noqa: D400, D414
log_msg('Creating files for individual assets')
# Open the input file - we'll need it later
- with open(self.input_file, encoding='utf-8') as f:
- input_data = json.load(f)
+ with open(self.input_file, encoding='utf-8') as f: # noqa: PTH123
+ input_data = json.load(f) # noqa: F841
# Get the workflow assets
- assetsWfapps = self.workflow_apps.get('Assets', None)
- assetWfList = self.workflow_assets.keys()
+ assetsWfapps = self.workflow_apps.get('Assets', None) # noqa: N806
+ assetWfList = self.workflow_assets.keys() # noqa: N806, F841
- # TODO: not elegant code, fix later
+ # TODO: not elegant code, fix later # noqa: TD002
os.chdir(self.run_dir)
- assetFilesList = {}
+ assetFilesList = {} # noqa: N806
# Iterate through the asset workflow apps
for asset_type, asset_app in assetsWfapps.items():
asset_folder = posixpath.join(self.run_dir, asset_type)
# Make a new directory for each asset
- os.mkdir(asset_folder)
+ os.mkdir(asset_folder) # noqa: PTH102
asset_file = posixpath.join(asset_folder, asset_type) + '.json'
- assetPrefs = asset_app.pref
+ assetPrefs = asset_app.pref # noqa: N806, F841
# filter assets (if needed)
asset_filter = asset_app.pref.get('filter', None)
@@ -1237,8 +1237,8 @@ def create_asset_files(self):
# The GEOJSON_TO_ASSET application is special because it can be used
# for multiple asset types. "asset_type" needs to be added so the app
# knows which asset_type it's processing.
- if asset_app.name == 'GEOJSON_TO_ASSET' or asset_app.name == 'INP_FILE':
- asset_command_list = asset_command_list + [
+ if asset_app.name == 'GEOJSON_TO_ASSET' or asset_app.name == 'INP_FILE': # noqa: PLR1714
+ asset_command_list = asset_command_list + [ # noqa: RUF005
'--assetType',
asset_type,
'--inputJsonFile',
@@ -1261,7 +1261,7 @@ def create_asset_files(self):
'\n# Perform Asset File Creation for type: ' + asset_type + ' \n'
)
- if asset_app.runsParallel == False:
+ if asset_app.runsParallel == False: # noqa: E712
self.parCommandFile.write(command + '\n')
else:
self.parCommandFile.write(
@@ -1289,11 +1289,11 @@ def create_asset_files(self):
# Check if the command was completed successfully
if returncode != 0:
- print(result)
+ print(result) # noqa: T201
raise WorkFlowInputError(
'Failed to create the AIM file for ' + asset_type
)
- else:
+ else: # noqa: RET506
log_msg(
'AIM files created for ' + asset_type + '\n',
prepend_timestamp=False,
@@ -1318,7 +1318,7 @@ def create_asset_files(self):
return assetFilesList
- def augment_asset_files(self):
+ def augment_asset_files(self): # noqa: C901
"""Short description
Longer description
@@ -1326,23 +1326,23 @@ def augment_asset_files(self):
Parameters
----------
- """
+ """ # noqa: D400, D414
log_msg('Augmenting files for individual assets for Workflow')
# print('INPUT FILE:', self.input_file)
# Open the input file - we'll need it later
- with open(self.input_file, encoding='utf-8') as f:
+ with open(self.input_file, encoding='utf-8') as f: # noqa: PTH123
input_data = json.load(f)
# Get the workflow assets
- assetsWfapps = self.workflow_apps.get('Assets', None)
- assetWfList = self.workflow_assets.keys()
+ assetsWfapps = self.workflow_apps.get('Assets', None) # noqa: N806
+ assetWfList = self.workflow_assets.keys() # noqa: N806, F841
- # TODO: not elegant code, fix later
+ # TODO: not elegant code, fix later # noqa: TD002
os.chdir(self.run_dir)
- assetFilesList = {}
+ assetFilesList = {} # noqa: N806
# Iterate through the asset workflow apps
for asset_type, asset_app in assetsWfapps.items():
@@ -1350,7 +1350,7 @@ def augment_asset_files(self):
asset_file = posixpath.join(asset_folder, asset_type) + '.json'
- assetPrefs = asset_app.pref
+ assetPrefs = asset_app.pref # noqa: N806, F841
# filter assets (if needed)
asset_filter = asset_app.pref.get('filter', None)
@@ -1380,7 +1380,7 @@ def augment_asset_files(self):
# Append workflow settings to the BIM file
log_msg('Appending additional settings to the AIM files...\n')
- with open(asset_file, encoding='utf-8') as f:
+ with open(asset_file, encoding='utf-8') as f: # noqa: PTH123
asset_data = json.load(f)
# extract the extra information from the input file for this asset type
@@ -1401,7 +1401,7 @@ def augment_asset_files(self):
]
for app_type in apps_of_interest:
# Start with the app data under Applications
- if app_type in input_data['Applications'].keys():
+ if app_type in input_data['Applications'].keys(): # noqa: SIM118
if app_type == 'Events':
# Events are stored in an array, so they require special treatment
app_data_array = input_data['Applications'][app_type]
@@ -1428,7 +1428,7 @@ def augment_asset_files(self):
extra_input['Applications'][app_type] = app_info
# Then, look at the app data in the root of the input json
- if app_type in input_data.keys():
+ if app_type in input_data.keys(): # noqa: SIM118
if app_type == 'Events':
# Events are stored in an array, so they require special treatment
app_data_array = input_data[app_type]
@@ -1449,37 +1449,37 @@ def augment_asset_files(self):
count = 0
for asst in asset_data:
if count % self.numP == self.procID:
- AIM_file = asst['file']
+ AIM_file = asst['file'] # noqa: N806
# Open the AIM file and add the unit information to it
# print(count, self.numP, self.procID, AIM_file)
- with open(AIM_file, encoding='utf-8') as f:
- AIM_data = json.load(f)
+ with open(AIM_file, encoding='utf-8') as f: # noqa: PTH123
+ AIM_data = json.load(f) # noqa: N806
- if 'DefaultValues' in input_data.keys():
+ if 'DefaultValues' in input_data.keys(): # noqa: SIM118
AIM_data.update(
{'DefaultValues': input_data['DefaultValues']}
)
- if 'commonFileDir' in input_data.keys():
- commonFileDir = input_data['commonFileDir']
+ if 'commonFileDir' in input_data.keys(): # noqa: SIM118
+ commonFileDir = input_data['commonFileDir'] # noqa: N806
if self.inputFilePath not in commonFileDir:
- commonFileDir = os.path.join(
+ commonFileDir = os.path.join( # noqa: PTH118, N806
self.inputFilePath, input_data['commonFileDir']
)
AIM_data.update({'commonFileDir': commonFileDir})
- if 'remoteAppDir' in input_data.keys():
+ if 'remoteAppDir' in input_data.keys(): # noqa: SIM118
AIM_data.update({'remoteAppDir': input_data['remoteAppDir']})
- if 'localAppDir' in input_data.keys():
+ if 'localAppDir' in input_data.keys(): # noqa: SIM118
AIM_data.update({'localAppDir': input_data['localAppDir']})
- if self.units != None:
+ if self.units != None: # noqa: E711
AIM_data.update({'units': self.units})
- # TODO: remove this after all apps have been updated to use the
+ # TODO: remove this after all apps have been updated to use the # noqa: TD002
# above location to get units
AIM_data['GeneralInformation'].update({'units': self.units})
@@ -1493,7 +1493,7 @@ def augment_asset_files(self):
AIM_data.update(extra_input)
- with open(AIM_file, 'w', encoding='utf-8') as f:
+ with open(AIM_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(AIM_data, f, indent=2)
count = count + 1
@@ -1516,8 +1516,8 @@ def perform_system_performance_assessment(self, asset_type):
asset_type: string
Asset type to run perform system assessment of
- """
- if 'SystemPerformance' in self.workflow_apps.keys():
+ """ # noqa: D400
+ if 'SystemPerformance' in self.workflow_apps.keys(): # noqa: SIM118
performance_app = self.workflow_apps['SystemPerformance'][asset_type]
else:
log_msg(
@@ -1527,7 +1527,7 @@ def perform_system_performance_assessment(self, asset_type):
log_div()
return False
- if performance_app.rel_path == None:
+ if performance_app.rel_path == None: # noqa: E711
log_msg(
f'No Performance application to run for asset type: {asset_type}.',
prepend_timestamp=False,
@@ -1622,17 +1622,17 @@ def perform_regional_event(self):
Parameters
----------
- """
+ """ # noqa: D414
log_msg('Simulating regional event...')
- if 'RegionalEvent' in self.workflow_apps.keys():
+ if 'RegionalEvent' in self.workflow_apps.keys(): # noqa: SIM118
reg_event_app = self.workflow_apps['RegionalEvent']
else:
log_msg('No Regional Event Application to run.', prepend_timestamp=False)
log_div()
return
- if reg_event_app.rel_path == None:
+ if reg_event_app.rel_path == None: # noqa: E711
log_msg('No regional Event Application to run.', prepend_timestamp=False)
log_div()
return
@@ -1649,7 +1649,7 @@ def perform_regional_event(self):
)
self.parCommandFile.write('\n# Perform Regional Event Simulation\n')
- if reg_event_app.runsParallel == False:
+ if reg_event_app.runsParallel == False: # noqa: E712
self.parCommandFile.write(command + '\n')
else:
self.parCommandFile.write(
@@ -1677,7 +1677,7 @@ def perform_regional_event(self):
)
log_div()
- def perform_regional_recovery(self, asset_keys):
+ def perform_regional_recovery(self, asset_keys): # noqa: ARG002
"""Run an application to simulate regional recovery
Longer description
@@ -1685,17 +1685,17 @@ def perform_regional_recovery(self, asset_keys):
Parameters
----------
- """
+ """ # noqa: D400, D414
log_msg('Simulating Regional Recovery ...')
- if 'Recovery' in self.workflow_apps.keys():
+ if 'Recovery' in self.workflow_apps.keys(): # noqa: SIM118
reg_recovery_app = self.workflow_apps['Recovery']
else:
log_msg('No Recovery Application to run.', prepend_timestamp=False)
log_div()
return
- if reg_recovery_app.rel_path == None:
+ if reg_recovery_app.rel_path == None: # noqa: E711
log_msg('No regional Event Application to run.', prepend_timestamp=False)
log_div()
return
@@ -1712,7 +1712,7 @@ def perform_regional_recovery(self, asset_keys):
)
self.parCommandFile.write('\n# Perform Regional Recovery Simulation\n')
- if reg_recovery_app.runsParallel == False:
+ if reg_recovery_app.runsParallel == False: # noqa: E712
self.parCommandFile.write(command + '\n')
else:
self.parCommandFile.write(
@@ -1740,25 +1740,26 @@ def perform_regional_recovery(self, asset_keys):
)
log_div()
- def perform_regional_mapping(self, AIM_file_path, assetType, doParallel=True):
+ def perform_regional_mapping(self, AIM_file_path, assetType, doParallel=True): # noqa: FBT002, N803
"""Performs the regional mapping between the asset and a hazard event.
Parameters
----------
- """
+ """ # noqa: D401, D414
log_msg('', prepend_timestamp=False, prepend_blank_space=False)
log_msg('Creating regional mapping...')
reg_mapping_app = self.workflow_apps['RegionalMapping'][assetType]
- # TODO: not elegant code, fix later
+ # TODO: not elegant code, fix later # noqa: TD002
for input_ in reg_mapping_app.inputs:
if input_['id'] == 'assetFile':
input_['default'] = str(AIM_file_path)
-
# Get the event file path
- eventFilePath = self.shared_data.get('RegionalEvent', {}).get('eventFilePath', self.reference_dir)
+ eventFilePath = self.shared_data.get('RegionalEvent', {}).get( # noqa: N806
+ 'eventFilePath', self.reference_dir
+ )
reg_mapping_app.inputs.append(
{
@@ -1788,7 +1789,7 @@ def perform_regional_mapping(self, AIM_file_path, assetType, doParallel=True):
'\n# Regional Mapping for asset type: ' + assetType + ' \n'
)
- if reg_mapping_app.runsParallel == False:
+ if reg_mapping_app.runsParallel == False: # noqa: E712
self.parCommandFile.write(command + '\n')
else:
self.parCommandFile.write(
@@ -1831,7 +1832,7 @@ def perform_regional_mapping(self, AIM_file_path, assetType, doParallel=True):
log_div()
- def init_simdir(self, asst_id=None, AIM_file_path='AIM.json'):
+ def init_simdir(self, asst_id=None, AIM_file_path='AIM.json'): # noqa: C901, N803
"""Initializes the simulation directory for each asset.
In the current directory where the Asset Information Model (AIM) file resides, e.g., ./Buildings/2000-AIM.json, a new directory is created with the asset id, e.g., ./Buildings/2000, and within that directory a template directory is created (templatedir) ./Buildings/2000/templatedir. The AIM file is copied over to the template dir. It is within this template dir that the analysis is run for the individual asset.
@@ -1841,16 +1842,16 @@ def init_simdir(self, asst_id=None, AIM_file_path='AIM.json'):
asst_id - the asset id
AIM_file - file path to the existing AIM file
- """
+ """ # noqa: D401
log_msg('Initializing the simulation directory\n')
- aimDir = os.path.dirname(AIM_file_path)
- aimFileName = os.path.basename(AIM_file_path)
+ aimDir = os.path.dirname(AIM_file_path) # noqa: PTH120, N806
+ aimFileName = os.path.basename(AIM_file_path) # noqa: PTH119, N806
# If the path is not provided, assume the AIM file is in the run dir
- if os.path.exists(aimDir) == False:
- aimDir = self.run_dir
- aimFileName = AIM_file_path
+ if os.path.exists(aimDir) == False: # noqa: PTH110, E712
+ aimDir = self.run_dir # noqa: N806
+ aimFileName = AIM_file_path # noqa: N806
os.chdir(aimDir)
@@ -1860,9 +1861,9 @@ def init_simdir(self, asst_id=None, AIM_file_path='AIM.json'):
shutil.rmtree(asst_id, ignore_errors=True)
# create the asset_id dir and the template dir
- os.mkdir(asst_id)
+ os.mkdir(asst_id) # noqa: PTH102
os.chdir(asst_id)
- os.mkdir('templatedir')
+ os.mkdir('templatedir') # noqa: PTH102
os.chdir('templatedir')
# Make a copy of the AIM file
@@ -1873,32 +1874,32 @@ def init_simdir(self, asst_id=None, AIM_file_path='AIM.json'):
try:
shutil.copy(src, dst)
- print('Copied AIM file to: ', dst)
+ print('Copied AIM file to: ', dst) # noqa: T201
# os.remove(src)
- except:
- print('Error occurred while copying file: ', dst)
+ except: # noqa: E722
+ print('Error occurred while copying file: ', dst) # noqa: T201
else:
- for dir_or_file in os.listdir(os.getcwd()):
+ for dir_or_file in os.listdir(os.getcwd()): # noqa: PTH109
if dir_or_file not in ['log.txt', 'templatedir', 'input_data']:
- if os.path.isdir(dir_or_file):
+ if os.path.isdir(dir_or_file): # noqa: PTH112
shutil.rmtree(dir_or_file)
else:
- os.remove(dir_or_file)
+ os.remove(dir_or_file) # noqa: PTH107
os.chdir(
'templatedir'
- ) # TODO: we might want to add a generic id dir to be consistent with the regional workflow here
+ ) # TODO: we might want to add a generic id dir to be consistent with the regional workflow here # noqa: TD002
# Remove files with .j extensions that might be there from previous runs
- for file in os.listdir(os.getcwd()):
+ for file in os.listdir(os.getcwd()): # noqa: PTH109
if file.endswith('.j'):
- os.remove(file)
+ os.remove(file) # noqa: PTH107
# Make a copy of the input file and rename it to AIM.json
# This is a temporary fix, will be removed eventually.
- dst = Path(os.getcwd()) / AIM_file_path
+ dst = Path(os.getcwd()) / AIM_file_path # noqa: PTH109
# dst = posixpath.join(os.getcwd(),AIM_file)
if AIM_file_path != self.input_file:
shutil.copy(src=self.input_file, dst=dst)
@@ -1918,7 +1919,7 @@ def cleanup_simdir(self, asst_id):
Parameters
----------
- """
+ """ # noqa: D400, D414
log_msg('Cleaning up the simulation directory.')
os.chdir(self.run_dir)
@@ -1926,7 +1927,7 @@ def cleanup_simdir(self, asst_id):
if asst_id is not None:
os.chdir(asst_id)
- workdirs = os.listdir(os.getcwd())
+ workdirs = os.listdir(os.getcwd()) # noqa: PTH109
for workdir in workdirs:
if 'workdir' in workdir:
shutil.rmtree(workdir, ignore_errors=True)
@@ -1944,17 +1945,17 @@ def init_workdir(self):
Parameters
----------
- """
+ """ # noqa: D400, D414
log_msg('Initializing the working directory.')
os.chdir(self.run_dir)
- for dir_or_file in os.listdir(os.getcwd()):
+ for dir_or_file in os.listdir(os.getcwd()): # noqa: PTH109
if dir_or_file != 'log.txt':
- if os.path.isdir(dir_or_file):
+ if os.path.isdir(dir_or_file): # noqa: PTH112
shutil.rmtree(dir_or_file)
else:
- os.remove(dir_or_file)
+ os.remove(dir_or_file) # noqa: PTH107
log_msg(
'Working directory successfully initialized.', prepend_timestamp=False
@@ -1969,7 +1970,7 @@ def cleanup_workdir(self):
Parameters
----------
- """
+ """ # noqa: D400, D414
log_msg('Cleaning up the working directory.')
os.chdir(self.run_dir)
@@ -1983,10 +1984,10 @@ def cleanup_workdir(self):
log_msg('Working directory successfully cleaned up.')
log_div()
- def preprocess_inputs(
+ def preprocess_inputs( # noqa: C901
self,
app_sequence,
- AIM_file_path='AIM.json',
+ AIM_file_path='AIM.json', # noqa: N803
asst_id=None,
asset_type=None,
):
@@ -1997,16 +1998,16 @@ def preprocess_inputs(
Parameters
----------
- """
+ """ # noqa: D400, D414
log_msg('Running preprocessing step random variables')
# Get the directory to the asset class dir, e.g., buildings
- aimDir = os.path.dirname(AIM_file_path)
- aimFileName = os.path.basename(AIM_file_path)
+ aimDir = os.path.dirname(AIM_file_path) # noqa: PTH120, N806
+ aimFileName = os.path.basename(AIM_file_path) # noqa: PTH119, N806, F841
# If the path is not provided, assume the AIM file is in the run dir
- if os.path.exists(aimDir) == False:
- aimDir = self.run_dir
+ if os.path.exists(aimDir) == False: # noqa: PTH110, E712
+ aimDir = self.run_dir # noqa: N806
os.chdir(aimDir)
@@ -2018,7 +2019,7 @@ def preprocess_inputs(
for app_type in self.optional_apps:
if (app_type in app_sequence) and (
- app_type not in self.workflow_apps.keys()
+ app_type not in self.workflow_apps.keys() # noqa: SIM118
):
app_sequence.remove(app_type)
@@ -2028,7 +2029,7 @@ def preprocess_inputs(
if app_type != 'FEM':
if AIM_file_path is not None:
if type(workflow_app) is dict:
- for itemKey, item in workflow_app.items():
+ for itemKey, item in workflow_app.items(): # noqa: N806
if asset_type is not None and asset_type != itemKey:
continue
@@ -2198,16 +2199,16 @@ def preprocess_inputs(
)
log_div()
- def gather_workflow_inputs(self, asst_id=None, AIM_file_path='AIM.json'):
+ def gather_workflow_inputs(self, asst_id=None, AIM_file_path='AIM.json'): # noqa: N803, D102
log_msg('Gathering Workflow Inputs.', prepend_timestamp=False)
- if 'UQ' in self.workflow_apps.keys():
+ if 'UQ' in self.workflow_apps.keys(): # noqa: SIM118
# Get the directory to the asset class dir, e.g., buildings
- aimDir = os.path.dirname(AIM_file_path)
+ aimDir = os.path.dirname(AIM_file_path) # noqa: PTH120, N806
# If the path is not provided, assume the AIM file is in the run dir
- if os.path.exists(aimDir) == False:
- aimDir = self.run_dir
+ if os.path.exists(aimDir) == False: # noqa: PTH110, E712
+ aimDir = self.run_dir # noqa: N806
os.chdir(aimDir)
@@ -2216,7 +2217,7 @@ def gather_workflow_inputs(self, asst_id=None, AIM_file_path='AIM.json'):
os.chdir('templatedir')
- relPathCreateCommon = (
+ relPathCreateCommon = ( # noqa: N806
'applications/performUQ/common/createStandardUQ_Input'
)
abs_path = Path(self.app_dir_local) / relPathCreateCommon
@@ -2226,9 +2227,9 @@ def gather_workflow_inputs(self, asst_id=None, AIM_file_path='AIM.json'):
# arg_list.append(u'{}'.format(abs_path))
# inputFilePath = os.path.dirname(self.input_file)
- inputFilePath = os.getcwd()
- inputFilename = os.path.basename(self.input_file)
- pathToScFile = posixpath.join(inputFilePath, 'sc_' + inputFilename)
+ inputFilePath = os.getcwd() # noqa: PTH109, N806
+ inputFilename = os.path.basename(self.input_file) # noqa: PTH119, N806
+ pathToScFile = posixpath.join(inputFilePath, 'sc_' + inputFilename) # noqa: N806
# arg_list.append(u'{}'.format(self.input_file))
arg_list.append(f'{AIM_file_path}')
@@ -2266,29 +2267,29 @@ def gather_workflow_inputs(self, asst_id=None, AIM_file_path='AIM.json'):
log_msg('Successfully Gathered Inputs.', prepend_timestamp=False)
log_div()
- def create_driver_file(
+ def create_driver_file( # noqa: C901
self,
app_sequence,
asst_id=None,
- AIM_file_path='AIM.json',
+ AIM_file_path='AIM.json', # noqa: N803
):
"""This functipon creates a UQ driver file. This is only done if UQ is in the workflow apps
Parameters
----------
- """
- if 'UQ' in self.workflow_apps.keys():
+ """ # noqa: D400, D401, D404, D414
+ if 'UQ' in self.workflow_apps.keys(): # noqa: SIM118
log_msg('Creating the workflow driver file')
# print('ASSET_ID', asst_id)
# print('AIM_FILE_PATH', AIM_file_path)
- aimDir = os.path.dirname(AIM_file_path)
- aimFile = os.path.basename(AIM_file_path)
+ aimDir = os.path.dirname(AIM_file_path) # noqa: PTH120, N806
+ aimFile = os.path.basename(AIM_file_path) # noqa: PTH119, N806, F841
# If the path is not provided, assume the AIM file is in the run dir
- if os.path.exists(aimDir) == False:
- aimDir = self.run_dir
+ if os.path.exists(aimDir) == False: # noqa: PTH110, E712
+ aimDir = self.run_dir # noqa: N806
os.chdir(aimDir)
@@ -2303,7 +2304,7 @@ def create_driver_file(
for app_type in self.optional_apps:
if (app_type in app_sequence) and (
- app_type not in self.workflow_apps.keys()
+ app_type not in self.workflow_apps.keys() # noqa: SIM118
):
app_sequence.remove(app_type)
@@ -2313,7 +2314,7 @@ def create_driver_file(
# print('FMK runtype', self.run_type)
if self.run_type in ['set_up', 'runningRemote', 'parSETUP']:
if type(workflow_app) is dict:
- for itemKey, item in workflow_app.items():
+ for itemKey, item in workflow_app.items(): # noqa: B007, N806, PERF102
command_list = item.get_command_list(
app_path=self.app_dir_remote, force_posix=True
)
@@ -2334,7 +2335,7 @@ def create_driver_file(
)
elif type(workflow_app) is dict:
- for itemKey, item in workflow_app.items():
+ for itemKey, item in workflow_app.items(): # noqa: B007, N806, PERF102
command_list = item.get_command_list(
app_path=self.app_dir_local
)
@@ -2358,13 +2359,13 @@ def create_driver_file(
# log_msg('Workflow driver script:', prepend_timestamp=False)
# log_msg('\n{}\n'.format(driver_script), prepend_timestamp=False, prepend_blank_space=False)
- driverFile = self.default_values['driverFile']
+ driverFile = self.default_values['driverFile'] # noqa: N806
# KZ: for windows, to write bat
if platform.system() == 'Windows':
- driverFile = driverFile + '.bat'
+ driverFile = driverFile + '.bat' # noqa: N806
log_msg(driverFile)
- with open(driverFile, 'w', newline='\n', encoding='utf-8') as f:
+ with open(driverFile, 'w', newline='\n', encoding='utf-8') as f: # noqa: PTH123
f.write(driver_script)
log_msg(
@@ -2375,7 +2376,7 @@ def create_driver_file(
log_msg('No UQ requested, workflow driver is not needed.')
log_div()
- def simulate_response(self, AIM_file_path='AIM.json', asst_id=None):
+ def simulate_response(self, AIM_file_path='AIM.json', asst_id=None): # noqa: C901, N803
"""Short description
Longer description
@@ -2383,21 +2384,21 @@ def simulate_response(self, AIM_file_path='AIM.json', asst_id=None):
Parameters
----------
- """
+ """ # noqa: D400, D414
# Get the directory to the asset class dir, e.g., buildings
- aimDir = os.path.dirname(AIM_file_path)
- aimFileName = os.path.basename(AIM_file_path)
+ aimDir = os.path.dirname(AIM_file_path) # noqa: PTH120, N806
+ aimFileName = os.path.basename(AIM_file_path) # noqa: PTH119, N806, F841
# If the path is not provided, assume the AIM file is in the run dir
- if os.path.exists(aimDir) == False:
- aimDir = self.run_dir
+ if os.path.exists(aimDir) == False: # noqa: PTH110, E712
+ aimDir = self.run_dir # noqa: N806
os.chdir(aimDir)
if asst_id is not None:
os.chdir(asst_id)
- if 'UQ' in self.workflow_apps.keys():
+ if 'UQ' in self.workflow_apps.keys(): # noqa: SIM118
log_msg('Running response simulation')
os.chdir('templatedir')
@@ -2472,20 +2473,20 @@ def simulate_response(self, AIM_file_path='AIM.json', asst_id=None):
)
# if the DL is coupled with response estimation, we need to sort the results
- DL_app = self.workflow_apps.get('DL', None)
+ DL_app = self.workflow_apps.get('DL', None) # noqa: N806
# FMK
# if asst_id is not None:
# KZ: 10/19/2022, minor patch
if asst_id is not None and DL_app is not None:
- DL_app = DL_app['Buildings']
+ DL_app = DL_app['Buildings'] # noqa: N806
if DL_app is not None:
is_coupled = DL_app.pref.get('coupled_EDP', None)
if is_coupled:
if 'eventID' in dakota_out.columns:
- events = dakota_out['eventID'].values
+ events = dakota_out['eventID'].values # noqa: PD011
events = [int(e.split('x')[-1]) for e in events]
sorter = np.argsort(events)
dakota_out = dakota_out.iloc[sorter, :]
@@ -2495,7 +2496,7 @@ def simulate_response(self, AIM_file_path='AIM.json', asst_id=None):
# log_msg('Response simulation finished successfully.', prepend_timestamp=False)# sy - this message was showing up when quoFEM analysis failed
- except:
+ except: # noqa: E722
log_msg(
'dakotaTab.out not found. Response.csv not created.',
prepend_timestamp=False,
@@ -2517,22 +2518,22 @@ def simulate_response(self, AIM_file_path='AIM.json', asst_id=None):
log_div()
- def perform_asset_performance(asset_type):
- performanceWfapps = self.workflow_apps.get('Performance', None)
+ def perform_asset_performance(asset_type): # noqa: N805, D102
+ performanceWfapps = self.workflow_apps.get('Performance', None) # noqa: N806, F821
performance_app = performanceWfapps[asset_type]
app_command_list = performance_app.get_command_list(
- app_path=self.app_dir_local
+ app_path=self.app_dir_local # noqa: F821
)
command = create_command(app_command_list)
result, returncode = run_command(command)
- def estimate_losses(
+ def estimate_losses( # noqa: C901
self,
- AIM_file_path='AIM.json',
+ AIM_file_path='AIM.json', # noqa: N803
asst_id=None,
asset_type=None,
input_file=None,
- copy_resources=False,
+ copy_resources=False, # noqa: FBT002
):
"""Short description
@@ -2541,18 +2542,18 @@ def estimate_losses(
Parameters
----------
- """
- if 'DL' in self.workflow_apps.keys():
+ """ # noqa: D400, D414
+ if 'DL' in self.workflow_apps.keys(): # noqa: SIM118
log_msg('Running damage and loss assessment')
# Get the directory to the asset class dir, e.g., buildings
- aimDir = os.path.dirname(AIM_file_path)
- aimFileName = os.path.basename(AIM_file_path)
+ aimDir = os.path.dirname(AIM_file_path) # noqa: PTH120, N806
+ aimFileName = os.path.basename(AIM_file_path) # noqa: PTH119, N806
# If the path is not provided, assume the AIM file is in the run dir
- if os.path.exists(aimDir) == False:
- aimDir = self.run_dir
- aimFileName = AIM_file_path
+ if os.path.exists(aimDir) == False: # noqa: PTH110, E712
+ aimDir = self.run_dir # noqa: N806
+ aimFileName = AIM_file_path # noqa: N806
os.chdir(aimDir)
@@ -2582,7 +2583,7 @@ def estimate_losses(
workflow_app = self.workflow_apps['DL']
if type(workflow_app) is dict:
- for itemKey, item in workflow_app.items():
+ for itemKey, item in workflow_app.items(): # noqa: N806
if AIM_file_path is not None:
item.defaults['filenameDL'] = AIM_file_path
# for input_var in workflow_app.inputs:
@@ -2600,7 +2601,7 @@ def estimate_losses(
command_list.append('--dirnameOutput')
# Only add asset id if we are running a regional assessment
- if asst_id != None:
+ if asst_id != None: # noqa: E711
command_list.append(f'{aimDir}/{asst_id}')
else:
command_list.append(f'{aimDir}')
@@ -2623,7 +2624,7 @@ def estimate_losses(
# if multiple buildings are analyzed, copy the pelicun_log file to the root dir
if 'Assets' in self.app_type_list:
- try:
+ try: # noqa: SIM105
shutil.copy(
src=aimDir / f'{asst_id}/{"pelicun_log.txt"}',
dst=aimDir / f'pelicun_log_{asst_id}.txt',
@@ -2631,7 +2632,7 @@ def estimate_losses(
# src = posixpath.join(self.run_dir, '{}/{}'.format(asst_id, 'pelicun_log.txt')),
# dst = posixpath.join(self.run_dir, 'pelicun_log_{}.txt'.format(asst_id)))
- except:
+ except: # noqa: S110, E722
pass
else:
@@ -2647,7 +2648,7 @@ def estimate_losses(
command_list.append('--dirnameOutput')
# Only add asset id if we are running a regional assessment
- if asst_id != None:
+ if asst_id != None: # noqa: E711
command_list.append(f'{aimDir}/{asst_id}')
else:
command_list.append(f'{aimDir}')
@@ -2674,20 +2675,20 @@ def estimate_losses(
# if multiple buildings are analyzed, copy the pelicun_log file to the root dir
if 'Building' in self.app_type_list:
- try:
+ try: # noqa: SIM105
shutil.copy(
src=self.run_dir / f'{asst_id}/{"pelicun_log.txt"}',
dst=self.run_dir / f'pelicun_log_{asst_id}.txt',
)
# src = posixpath.join(self.run_dir, '{}/{}'.format(asst_id, 'pelicun_log.txt')),
# dst = posixpath.join(self.run_dir, 'pelicun_log_{}.txt'.format(asst_id)))
- except:
+ except: # noqa: S110, E722
pass
# Remove the copied AIM since it is not used anymore
try:
dst = posixpath.join(aimDir, f'{asst_id}/{aimFileName}')
- os.remove(dst)
- except:
+ os.remove(dst) # noqa: PTH107
+ except: # noqa: S110, E722
pass
log_msg(
'Damage and loss assessment finished successfully.',
@@ -2699,8 +2700,8 @@ def estimate_losses(
log_msg('No DL requested, loss assessment step is skipped.')
# Only regional simulations send in a asst id
- if asst_id != None:
- EDP_df = pd.read_csv('response.csv', header=0, index_col=0)
+ if asst_id != None: # noqa: E711
+ EDP_df = pd.read_csv('response.csv', header=0, index_col=0) # noqa: N806
col_info = []
for col in EDP_df.columns:
@@ -2710,18 +2711,18 @@ def estimate_losses(
col_info.append(['dummy', '1', '1'])
continue
split_col = col.split('-')
- if len(split_col[1]) == 3:
+ if len(split_col[1]) == 3: # noqa: PLR2004
col_info.append(split_col[1:])
- except:
+ except: # noqa: S112, E722
continue
col_info = np.transpose(col_info)
- EDP_types = np.unique(col_info[0])
- EDP_locs = np.unique(col_info[1])
- EDP_dirs = np.unique(col_info[2])
+ EDP_types = np.unique(col_info[0]) # noqa: N806
+ EDP_locs = np.unique(col_info[1]) # noqa: N806
+ EDP_dirs = np.unique(col_info[2]) # noqa: N806
- MI = pd.MultiIndex.from_product(
+ MI = pd.MultiIndex.from_product( # noqa: N806
[EDP_types, EDP_locs, EDP_dirs, ['median', 'beta']],
names=['type', 'loc', 'dir', 'stat'],
)
@@ -2753,7 +2754,7 @@ def estimate_losses(
EDP_df[f'1-{col[0]}-{col[1]}-{col[2]}']
).std()
- df_res.dropna(axis=1, how='all', inplace=True)
+ df_res.dropna(axis=1, how='all', inplace=True) # noqa: PD002
df_res = df_res.astype(float)
@@ -2762,15 +2763,15 @@ def estimate_losses(
log_div()
- def estimate_performance(
+ def estimate_performance( # noqa: D102
self,
- AIM_file_path='AIM.json',
+ AIM_file_path='AIM.json', # noqa: N803
asst_id=None,
- asset_type=None,
- input_file=None,
- copy_resources=False,
+ asset_type=None, # noqa: ARG002
+ input_file=None, # noqa: ARG002
+ copy_resources=False, # noqa: FBT002, ARG002
):
- if 'Performance' not in self.workflow_apps.keys():
+ if 'Performance' not in self.workflow_apps.keys(): # noqa: SIM118
log_msg(
'No performance assessment requested, performance assessment step is skipped.'
)
@@ -2780,13 +2781,13 @@ def estimate_performance(
log_msg('Running performance assessment')
# Get the directory to the asset class dir, e.g., buildings
- aimDir = os.path.dirname(AIM_file_path)
- aimFileName = os.path.basename(AIM_file_path)
+ aimDir = os.path.dirname(AIM_file_path) # noqa: PTH120, N806
+ aimFileName = os.path.basename(AIM_file_path) # noqa: PTH119, N806
# If the path is not provided, assume the AIM file is in the run dir
- if os.path.exists(aimDir) == False:
- aimDir = self.run_dir
- aimFileName = AIM_file_path
+ if os.path.exists(aimDir) == False: # noqa: PTH110, E712
+ aimDir = self.run_dir # noqa: N806
+ aimFileName = AIM_file_path # noqa: N806, F841
os.chdir(aimDir)
@@ -2797,7 +2798,7 @@ def estimate_performance(
command_list.append('--dirnameOutput')
# Only add asset id if we are running a regional assessment
- if asst_id != None:
+ if asst_id != None: # noqa: E711
command_list.append(f'{aimDir}/{asst_id}')
else:
command_list.append(f'{aimDir}')
@@ -2818,12 +2819,12 @@ def estimate_performance(
log_msg('Performance assessment finished.', prepend_timestamp=False)
log_div()
- def aggregate_results(
+ def aggregate_results( # noqa: C901, PLR0912, PLR0915
self,
asst_data,
asset_type='',
# out_types = ['IM', 'BIM', 'EDP', 'DM', 'DV', 'every_realization'],
- out_types=['AIM', 'EDP', 'DMG', 'DV', 'every_realization'],
+ out_types=['AIM', 'EDP', 'DMG', 'DV', 'every_realization'], # noqa: B006
headers=None,
):
"""Short description
@@ -2833,11 +2834,11 @@ def aggregate_results(
Parameters
----------
- """
+ """ # noqa: D400, D414
log_msg('Collecting ' + asset_type + ' damage and loss results')
- R2D_res_out_types = []
- with open(self.input_file) as f:
+ R2D_res_out_types = [] # noqa: N806
+ with open(self.input_file) as f: # noqa: PTH123
input_data = json.load(f)
requested_output = input_data['outputs']
for key, item in requested_output.items():
@@ -2859,7 +2860,7 @@ def aggregate_results(
) # max_id = int(asst_data[0]['id'])
#
- # TODO: ugly, ugly, I know.
+ # TODO: ugly, ugly, I know. # noqa: TD002
# Only temporary solution while we have both Pelicuns in parallel
# FMK - bug fix adding check on DL, not in siteResponse input file
#
@@ -2870,25 +2871,25 @@ def aggregate_results(
):
initialize_dicts = True
for a_i, asst in enumerate(asst_data):
- bldg_dir = Path(os.path.dirname(asst_data[a_i]['file'])).resolve()
+ bldg_dir = Path(os.path.dirname(asst_data[a_i]['file'])).resolve() # noqa: PTH120
main_dir = bldg_dir
- assetTypeHierarchy = [bldg_dir.name]
+ assetTypeHierarchy = [bldg_dir.name] # noqa: N806
while main_dir.parent.name != 'Results':
main_dir = bldg_dir.parent
- assetTypeHierarchy = [main_dir.name] + assetTypeHierarchy
+ assetTypeHierarchy = [main_dir.name] + assetTypeHierarchy # noqa: N806, RUF005
asset_id = asst['id']
asset_dir = bldg_dir / asset_id
# always get the AIM info
- AIM_file = None
+ AIM_file = None # noqa: N806
if f'{asset_id}-AIM_ap.json' in os.listdir(asset_dir):
- AIM_file = asset_dir / f'{asset_id}-AIM_ap.json'
+ AIM_file = asset_dir / f'{asset_id}-AIM_ap.json' # noqa: N806
elif f'{asset_id}-AIM.json' in os.listdir(asset_dir):
- AIM_file = asset_dir / f'{asset_id}-AIM.json'
+ AIM_file = asset_dir / f'{asset_id}-AIM.json' # noqa: N806
else:
# skip this asset if there is no AIM file available
@@ -2897,8 +2898,8 @@ def aggregate_results(
)
continue
- with open(AIM_file, encoding='utf-8') as f:
- AIM_data_i = json.load(f)
+ with open(AIM_file, encoding='utf-8') as f: # noqa: PTH123
+ AIM_data_i = json.load(f) # noqa: N806
sample_size = AIM_data_i['Applications']['DL']['ApplicationData'][
'Realizations'
@@ -2923,20 +2924,20 @@ def aggregate_results(
rlzn_pointer = {
rlz_i: realizations[rlz_i] for rlz_i in range(sample_size)
}
- for assetTypeIter in assetTypeHierarchy:
- if assetTypeIter not in deter_pointer.keys():
+ for assetTypeIter in assetTypeHierarchy: # noqa: N806
+ if assetTypeIter not in deter_pointer.keys(): # noqa: SIM118
deter_pointer.update({assetTypeIter: {}})
deter_pointer = deter_pointer[assetTypeIter]
for rlz_i in range(sample_size):
- if assetTypeIter not in rlzn_pointer[rlz_i].keys():
+ if assetTypeIter not in rlzn_pointer[rlz_i].keys(): # noqa: SIM118
rlzn_pointer[rlz_i].update({assetTypeIter: {}})
rlzn_pointer[rlz_i] = rlzn_pointer[rlz_i][assetTypeIter]
# Currently, all GI data is deterministic
- GI_data_i_det = AIM_data_i['GeneralInformation']
+ GI_data_i_det = AIM_data_i['GeneralInformation'] # noqa: N806
- # TODO: later update this to handle probabilistic GI attributes
- GI_data_i_prob = {}
+ # TODO: later update this to handle probabilistic GI attributes # noqa: TD002
+ GI_data_i_prob = {} # noqa: N806
for rlz_i in range(sample_size):
rlzn_pointer[rlz_i].update(
@@ -2957,7 +2958,7 @@ def aggregate_results(
)
else:
- with open(asset_dir / edp_out_file_i, encoding='utf-8') as f:
+ with open(asset_dir / edp_out_file_i, encoding='utf-8') as f: # noqa: PTH123
edp_data_i = json.load(f)
# remove the ONE demand
@@ -3014,7 +3015,7 @@ def aggregate_results(
)
else:
- with open(asset_dir / dmg_out_file_i, encoding='utf-8') as f:
+ with open(asset_dir / dmg_out_file_i, encoding='utf-8') as f: # noqa: PTH123
dmg_data_i = json.load(f)
# remove damage unit info
@@ -3037,16 +3038,16 @@ def aggregate_results(
dmg_output.update({rlz_i: rlz_output})
# we assume that damage information is condensed
- # TODO: implement condense_ds flag in DL_calc
+ # TODO: implement condense_ds flag in DL_calc # noqa: TD002
for rlz_i in range(sample_size):
rlzn_pointer[rlz_i][asset_id].update(
{'Damage': dmg_output[rlz_i]}
)
if 'DM' in R2D_res_out_types:
# use forward fill in case of multiple modes
- meanValues = dmg_data_i.mode().ffill().mean()
- stdValues = dmg_data_i.std()
- r2d_res_dmg = dict()
+ meanValues = dmg_data_i.mode().ffill().mean() # noqa: N806, F841
+ stdValues = dmg_data_i.std() # noqa: N806, F841
+ r2d_res_dmg = dict() # noqa: C408
# for key in dmg_data_i.columns:
# meanKey = f'R2Dres_mode_{key}'
# stdKey = f'R2Dres_std_{key}'
@@ -3074,7 +3075,7 @@ def aggregate_results(
)
else:
- with open(asset_dir / dv_out_file_i, encoding='utf-8') as f:
+ with open(asset_dir / dv_out_file_i, encoding='utf-8') as f: # noqa: PTH123
dv_data_i = json.load(f)
# extract DV unit info
@@ -3114,7 +3115,7 @@ def aggregate_results(
deter_pointer[asset_id].update({'Loss': {'Units': dv_units}})
if 'DV' in R2D_res_out_types:
- r2d_res_dv = dict()
+ r2d_res_dv = dict() # noqa: C408
cost_columns = [
col
for col in dv_data_i.columns
@@ -3124,10 +3125,10 @@ def aggregate_results(
cost_data = dv_data_i[cost_columns].mean()
cost_data_std = dv_data_i[cost_columns].std()
cost_key = cost_data.idxmax()
- meanKey = (
+ meanKey = ( # noqa: N806
f'R2Dres_mean_RepairCost_{dv_units[cost_key]}'
)
- stdKey = (
+ stdKey = ( # noqa: N806
f'R2Dres_std_RepairCost_{dv_units[cost_key]}'
)
r2d_res_dv.update(
@@ -3145,10 +3146,10 @@ def aggregate_results(
time_data = dv_data_i[time_columns].mean()
time_data_std = dv_data_i[time_columns].std()
time_key = time_data.idxmax()
- meanKey = (
+ meanKey = ( # noqa: N806
f'R2Dres_mean_RepairTime_{dv_units[time_key]}'
)
- stdKey = (
+ stdKey = ( # noqa: N806
f'R2Dres_std_RepairTime_{dv_units[time_key]}'
)
r2d_res_dv.update(
@@ -3185,12 +3186,12 @@ def aggregate_results(
# save outputs to JSON files
for rlz_i, rlz_data in realizations.items():
- with open(
+ with open( # noqa: PTH123
main_dir / f'{asset_type}_{rlz_i}.json', 'w', encoding='utf-8'
) as f:
json.dump(rlz_data, f, indent=2)
- with open(
+ with open( # noqa: PTH123
main_dir / f'{asset_type}_det.json', 'w', encoding='utf-8'
) as f:
json.dump(deterministic, f, indent=2)
@@ -3200,7 +3201,7 @@ def aggregate_results(
out_types = ['IM', 'BIM', 'EDP', 'DM', 'DV', 'every_realization']
if headers is None:
- headers = dict(
+ headers = dict( # noqa: C408
IM=[0, 1, 2, 3],
AIM=[
0,
@@ -3215,15 +3216,15 @@ def aggregate_results(
self.output_types.get(out_type, False)
):
if out_type == 'every_realization':
- realizations_EDP = None
- realizations_DL = None
+ realizations_EDP = None # noqa: N806
+ realizations_DL = None # noqa: N806
for asst in asst_data:
- print('ASSET', asst)
+ print('ASSET', asst) # noqa: T201
asst_file = asst['file']
# Get the folder containing the results
- aimDir = os.path.dirname(asst_file)
+ aimDir = os.path.dirname(asst_file) # noqa: PTH120, N806
asst_id = asst['id']
min_id = min(int(asst_id), min_id)
@@ -3237,8 +3238,8 @@ def aggregate_results(
index_col=0,
)
- if realizations_EDP == None:
- realizations_EDP = dict(
+ if realizations_EDP == None: # noqa: E711
+ realizations_EDP = dict( # noqa: C404, N806
[(col, []) for col in df_i.columns]
)
@@ -3251,7 +3252,7 @@ def aggregate_results(
# If damage and loss assessment is part of the workflow
# then save the DL outputs too
- if 'DL' in self.workflow_apps.keys():
+ if 'DL' in self.workflow_apps.keys(): # noqa: SIM118
try:
# if True:
df_i = pd.read_csv(
@@ -3260,8 +3261,8 @@ def aggregate_results(
index_col=0,
)
- if realizations_DL == None:
- realizations_DL = dict(
+ if realizations_DL == None: # noqa: E711
+ realizations_DL = dict( # noqa: C404, N806
[(col, []) for col in df_i.columns]
)
@@ -3272,13 +3273,13 @@ def aggregate_results(
]
realizations_DL[col].append(vals)
- except:
+ except: # noqa: E722
log_msg(
f'Error reading DL realization data for asset {asset_type} {asst_id}',
prepend_timestamp=False,
)
- for d_type in realizations_EDP.keys():
+ for d_type in realizations_EDP.keys(): # noqa: SIM118
d_agg = pd.concat(
realizations_EDP[d_type], axis=0, sort=False
)
@@ -3293,8 +3294,8 @@ def aggregate_results(
format='fixed',
)
- if 'DL' in self.workflow_apps.keys():
- for d_type in realizations_DL.keys():
+ if 'DL' in self.workflow_apps.keys(): # noqa: SIM118
+ for d_type in realizations_DL.keys(): # noqa: SIM118
d_agg = pd.concat(
realizations_DL[d_type], axis=0, sort=False
)
@@ -3315,11 +3316,11 @@ def aggregate_results(
count = 0
for asst in asst_data:
if count % self.numP == self.procID:
- print('ASSET', self.procID, self.numP, asst['file'])
+ print('ASSET', self.procID, self.numP, asst['file']) # noqa: T201
asst_file = asst['file']
# Get the folder containing the results
- aimDir = os.path.dirname(asst_file)
+ aimDir = os.path.dirname(asst_file) # noqa: PTH120, N806
asst_id = asst['id']
min_id = min(int(asst_id), min_id)
@@ -3328,7 +3329,7 @@ def aggregate_results(
try:
# if True:
- csvPath = (
+ csvPath = ( # noqa: N806
aimDir + '/' + asst_id + f'/{out_type}.csv'
)
@@ -3345,7 +3346,7 @@ def aggregate_results(
out_list.append(df_i)
- except:
+ except: # noqa: E722
log_msg(
f'Error reading {out_type} data for asset {asset_type} {asst_id}',
prepend_timestamp=False,
@@ -3356,9 +3357,9 @@ def aggregate_results(
# save the collected DataFrames as csv files
if self.procID == 0:
- outPath = posixpath.join(run_path, f'{out_type}.csv')
+ outPath = posixpath.join(run_path, f'{out_type}.csv') # noqa: N806
else:
- outPath = posixpath.join(
+ outPath = posixpath.join( # noqa: N806
run_path, f'{out_type}_tmp_{self.procID}.csv'
)
@@ -3379,7 +3380,7 @@ def aggregate_results(
# fileList = []
for i in range(1, self.numP):
- fileToAppend = posixpath.join(
+ fileToAppend = posixpath.join( # noqa: N806
run_path, f'{out_type}_tmp_{i}.csv'
)
# fileList.append(fileToAppend)
@@ -3405,18 +3406,18 @@ def aggregate_results(
)
log_div()
- def compile_r2d_results_geojson(self, asset_files):
+ def compile_r2d_results_geojson(self, asset_files): # noqa: D102
run_path = self.run_dir
- with open(self.input_file, encoding='utf-8') as f:
+ with open(self.input_file, encoding='utf-8') as f: # noqa: PTH123
input_data = json.load(f)
- with open(run_path / 'Results_det.json', encoding='utf-8') as f:
+ with open(run_path / 'Results_det.json', encoding='utf-8') as f: # noqa: PTH123
res_det = json.load(f)
metadata = {
'Name': input_data['Name'],
'Units': input_data['units'],
'Author': input_data['Author'],
'WorkflowType': input_data['WorkflowType'],
- 'Time': datetime.now().strftime('%m-%d-%Y %H:%M:%S'),
+ 'Time': datetime.now().strftime('%m-%d-%Y %H:%M:%S'), # noqa: DTZ005
}
# create the geojson for R2D visualization
geojson_result = {
@@ -3428,12 +3429,12 @@ def compile_r2d_results_geojson(self, asset_files):
'metadata': metadata,
'features': [],
}
- for asset_type in asset_files.keys():
- for assetSubtype, subtypeResult in res_det[asset_type].items():
- allAssetIds = sorted([int(x) for x in subtypeResult.keys()])
+ for asset_type in asset_files.keys(): # noqa: SIM118
+ for assetSubtype, subtypeResult in res_det[asset_type].items(): # noqa: N806
+ allAssetIds = sorted([int(x) for x in subtypeResult.keys()]) # noqa: SIM118, N806
for asset_id in allAssetIds:
ft = {'type': 'Feature'}
- asst_GI = subtypeResult[str(asset_id)][
+ asst_GI = subtypeResult[str(asset_id)][ # noqa: N806
'GeneralInformation'
].copy()
asst_GI.update({'assetType': asset_type})
@@ -3454,8 +3455,8 @@ def compile_r2d_results_geojson(self, asset_files):
'coordinates': [asst_lon, asst_lat],
}
asst_GI.pop('location')
- except:
- warnings.warn(
+ except: # noqa: E722
+ warnings.warn( # noqa: B028
UserWarning(
f'Geospatial info is missing in {assetSubtype} {asset_id}'
)
@@ -3467,20 +3468,20 @@ def compile_r2d_results_geojson(self, asset_files):
ft.update({'properties': asst_GI})
ft['properties'].update(subtypeResult[str(asset_id)]['R2Dres'])
geojson_result['features'].append(ft)
- with open(run_path / 'R2D_results.geojson', 'w', encoding='utf-8') as f:
+ with open(run_path / 'R2D_results.geojson', 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(geojson_result, f, indent=2)
- def combine_assets_results(self, asset_files):
+ def combine_assets_results(self, asset_files): # noqa: D102
asset_types = list(asset_files.keys())
for asset_type in asset_types:
if self.workflow_apps['DL'][asset_type].name != 'Pelicun3':
# isPelicun3 = False
asset_files.pop(asset_type)
if asset_files: # If any asset_type uses Pelicun3 as DL app
- with open(self.input_file, encoding='utf-8') as f:
+ with open(self.input_file, encoding='utf-8') as f: # noqa: PTH123
input_data = json.load(f)
sample_size = []
- for asset_type, assetIt in asset_files.items():
+ for asset_type, assetIt in asset_files.items(): # noqa: B007, N806, PERF102
sample_size.append(
input_data['Applications']['DL'][asset_type]['ApplicationData'][
'Realizations'
@@ -3490,23 +3491,23 @@ def combine_assets_results(self, asset_files):
# Create the Results_det.json and Results_rlz_i.json for recoverary
deterministic = {}
realizations = {rlz_i: {} for rlz_i in range(sample_size)}
- for asset_type in asset_files.keys():
+ for asset_type in asset_files.keys(): # noqa: SIM118
asset_dir = self.run_dir / asset_type
determine_file = asset_dir / f'{asset_type}_det.json'
- with open(determine_file, encoding='utf-8') as f:
+ with open(determine_file, encoding='utf-8') as f: # noqa: PTH123
determ_i = json.load(f)
deterministic.update(determ_i)
for rlz_i in range(sample_size):
rlz_i_file = asset_dir / f'{asset_type}_{rlz_i}.json'
- with open(rlz_i_file, encoding='utf-8') as f:
+ with open(rlz_i_file, encoding='utf-8') as f: # noqa: PTH123
rlz_i_i = json.load(f)
realizations[rlz_i].update(rlz_i_i)
determine_file = self.run_dir / 'Results_det.json'
- with open(determine_file, 'w', encoding='utf-8') as f:
+ with open(determine_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(deterministic, f, indent=2)
for rlz_i, rlz_data in realizations.items():
- with open(
+ with open( # noqa: PTH123
self.run_dir / f'Results_{rlz_i}.json', 'w', encoding='utf-8'
) as f:
json.dump(rlz_data, f, indent=2)
diff --git a/modules/common/simcenter_common.py b/modules/common/simcenter_common.py
index f622f6e03..d1aad42e4 100644
--- a/modules/common/simcenter_common.py
+++ b/modules/common/simcenter_common.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -42,23 +42,23 @@
# Monkeypatch warnings to get prettier messages
-def _warning(message, category, filename, lineno, file=None, line=None):
+def _warning(message, category, filename, lineno, file=None, line=None): # noqa: ARG001
if '\\' in filename:
file_path = filename.split('\\')
elif '/' in filename:
file_path = filename.split('/')
python_file = '/'.join(file_path[-3:])
- print(f'WARNING in {python_file} at line {lineno}\n{message}\n')
+ print(f'WARNING in {python_file} at line {lineno}\n{message}\n') # noqa: T201
warnings.showwarning = _warning
-def show_warning(warning_msg):
- warnings.warn(UserWarning(warning_msg))
+def show_warning(warning_msg): # noqa: D103
+ warnings.warn(UserWarning(warning_msg)) # noqa: B028
-def log_msg(msg='', prepend_timestamp=True):
+def log_msg(msg='', prepend_timestamp=True): # noqa: FBT002
"""Print a message to the screen with the current time as prefix
The time is in ISO-8601 format, e.g. 2018-06-16T20:24:04Z
@@ -68,19 +68,19 @@ def log_msg(msg='', prepend_timestamp=True):
msg: string
Message to print.
- """
+ """ # noqa: D400
if prepend_timestamp:
formatted_msg = '{} {}'.format(
- datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S:%fZ')[:-4],
+ datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S:%fZ')[:-4], # noqa: DTZ003
msg,
)
else:
formatted_msg = msg
- print(formatted_msg)
+ print(formatted_msg) # noqa: T201
if globals().get('log_file', None) is not None:
- with open(globals()['log_file'], 'a') as f:
+ with open(globals()['log_file'], 'a') as f: # noqa: PTH123
f.write('\n' + formatted_msg)
@@ -169,7 +169,7 @@ def log_msg(msg='', prepend_timestamp=True):
# force
N = kg * m / sec2
-kN = 1e3 * N
+kN = 1e3 * N # noqa: N816
lbf = lb * g
kip = 1000.0 * lbf
@@ -178,7 +178,7 @@ def log_msg(msg='', prepend_timestamp=True):
# pressure / stress
Pa = N / m2
-kPa = 1e3 * Pa
+kPa = 1e3 * Pa # noqa: N816
MPa = 1e6 * Pa
GPa = 1e9 * Pa
@@ -216,8 +216,8 @@ def log_msg(msg='', prepend_timestamp=True):
unit_decoupling_type_list = ['TH_file']
-def get_scale_factors(input_units, output_units):
- """Determine the scale factor to convert input event to internal event data"""
+def get_scale_factors(input_units, output_units): # noqa: C901
+ """Determine the scale factor to convert input event to internal event data""" # noqa: D400
# special case: if the input unit is not specified then do not do any scaling
if input_units is None:
scale_factors = {'ALL': 1.0}
@@ -231,13 +231,13 @@ def get_scale_factors(input_units, output_units):
unit_length = 'inch'
f_length = globals().get(unit_length, None)
if f_length is None:
- raise ValueError(f'Specified length unit not recognized: {unit_length}')
+ raise ValueError(f'Specified length unit not recognized: {unit_length}') # noqa: EM102, TRY003
# if no time unit is specified, 'sec' is assumed
unit_time = output_units.get('time', 'sec')
f_time = globals().get(unit_time, None)
if f_time is None:
- raise ValueError(f'Specified time unit not recognized: {unit_time}')
+ raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: EM102, TRY003
scale_factors = {}
@@ -249,11 +249,11 @@ def get_scale_factors(input_units, output_units):
else:
# get the scale factor to standard units
if input_unit == 'in':
- input_unit = 'inch'
+ input_unit = 'inch' # noqa: PLW2901
f_in = globals().get(input_unit, None)
if f_in is None:
- raise ValueError(f'Input unit not recognized: {input_unit}')
+ raise ValueError(f'Input unit not recognized: {input_unit}') # noqa: EM102, TRY003
unit_type = None
for base_unit_type, unit_set in globals()['unit_types'].items():
@@ -261,7 +261,7 @@ def get_scale_factors(input_units, output_units):
unit_type = base_unit_type
if unit_type is None:
- raise ValueError(f'Failed to identify unit type: {input_unit}')
+ raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: EM102, TRY003
# the output unit depends on the unit type
if unit_type == 'acceleration':
@@ -274,8 +274,8 @@ def get_scale_factors(input_units, output_units):
f_out = 1.0 / f_length
else:
- raise ValueError(
- f'Unexpected unit type in workflow: {unit_type}'
+ raise ValueError( # noqa: TRY003
+ f'Unexpected unit type in workflow: {unit_type}' # noqa: EM102
)
# the scale factor is the product of input and output scaling
@@ -287,7 +287,7 @@ def get_scale_factors(input_units, output_units):
def get_unit_bases(input_units):
- """Decouple input units"""
+ """Decouple input units""" # noqa: D400
# special case: if the input unit is not specified then do nothing
if input_units is None:
input_unit_bases = {}
diff --git a/modules/createAIM/CSV_to_AIM/CSV_to_AIM.py b/modules/createAIM/CSV_to_AIM/CSV_to_AIM.py
index b58068b35..b3a6c8321 100644
--- a/modules/createAIM/CSV_to_AIM/CSV_to_AIM.py
+++ b/modules/createAIM/CSV_to_AIM/CSV_to_AIM.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -44,7 +44,7 @@
import sys
-def create_asset_files(output_file, asset_source_file, asset_filter, doParallel):
+def create_asset_files(output_file, asset_source_file, asset_filter, doParallel): # noqa: C901, N803, D103
# these imports are here to save time when the app is called without
# the -getRV flag
import importlib
@@ -54,9 +54,9 @@ def create_asset_files(output_file, asset_source_file, asset_filter, doParallel)
import pandas as pd
# check if running parallel
- numP = 1
- procID = 0
- runParallel = False
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
+ runParallel = False # noqa: N806
if doParallel == 'True':
mpi_spec = importlib.util.find_spec('mpi4py')
@@ -64,18 +64,18 @@ def create_asset_files(output_file, asset_source_file, asset_filter, doParallel)
if found:
from mpi4py import MPI
- runParallel = True
+ runParallel = True # noqa: N806
comm = MPI.COMM_WORLD
- numP = comm.Get_size()
- procID = comm.Get_rank()
- if numP < 2:
- doParallel = 'False'
- runParallel = False
- numP = 1
- procID = 0
+ numP = comm.Get_size() # noqa: N806
+ procID = comm.Get_rank() # noqa: N806
+ if numP < 2: # noqa: PLR2004
+ doParallel = 'False' # noqa: N806
+ runParallel = False # noqa: N806
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
# Get the out dir, may not always be in the results folder if multiple assets are used
- outDir = os.path.dirname(output_file)
+ outDir = os.path.dirname(output_file) # noqa: PTH120, N806
# check if a filter is provided
if asset_filter is not None:
@@ -93,7 +93,7 @@ def create_asset_files(output_file, asset_source_file, asset_filter, doParallel)
# if there is a filter, then pull out only the required assets
if asset_filter is not None:
- assets_available = assets_df.index.values
+ assets_available = assets_df.index.values # noqa: PD011
assets_to_run = assets_requested[
np.where(np.isin(assets_requested, assets_available))[0]
]
@@ -102,18 +102,18 @@ def create_asset_files(output_file, asset_source_file, asset_filter, doParallel)
selected_assets = assets_df
# identify the labels
- labels = selected_assets.columns.values
+ labels = selected_assets.columns.values # noqa: PD011
assets_array = []
# for each asset...
count = 0
for asset_id, asset in selected_assets.iterrows():
- if runParallel == False or (count % numP) == procID:
+ if runParallel == False or (count % numP) == procID: # noqa: E712
# initialize the AIM file
- AIM_i = {
+ AIM_i = { # noqa: N806
'RandomVariables': [],
- 'GeneralInformation': dict(
+ 'GeneralInformation': dict( # noqa: C408
AIM_id=str(int(asset_id)),
location={
'latitude': asset['Latitude'],
@@ -126,40 +126,40 @@ def create_asset_files(output_file, asset_source_file, asset_filter, doParallel)
for label in labels:
AIM_i['GeneralInformation'].update({label: asset[label]})
- AIM_file_name = f'{asset_id}-AIM.json'
+ AIM_file_name = f'{asset_id}-AIM.json' # noqa: N806
- AIM_file_name = os.path.join(outDir, AIM_file_name)
+ AIM_file_name = os.path.join(outDir, AIM_file_name) # noqa: PTH118, N806
- with open(AIM_file_name, 'w', encoding='utf-8') as f:
+ with open(AIM_file_name, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(AIM_i, f, indent=2)
- assets_array.append(dict(id=str(asset_id), file=AIM_file_name))
+ assets_array.append(dict(id=str(asset_id), file=AIM_file_name)) # noqa: C408
count = count + 1
if procID != 0:
# if not P0, write data to output file with procID in name and barrier
- output_file = os.path.join(outDir, f'tmp_{procID}.json')
+ output_file = os.path.join(outDir, f'tmp_{procID}.json') # noqa: PTH118
- with open(output_file, 'w', encoding='utf-8') as f:
+ with open(output_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(assets_array, f, indent=0)
comm.Barrier()
else:
- if runParallel == True:
+ if runParallel == True: # noqa: E712
# if parallel & P0, barrier so that all files written above, then loop over other processor files: open, load data and append
comm.Barrier()
for i in range(1, numP):
- fileToAppend = os.path.join(outDir, f'tmp_{i}.json')
- with open(fileToAppend, encoding='utf-8') as data_file:
+ fileToAppend = os.path.join(outDir, f'tmp_{i}.json') # noqa: PTH118, N806
+ with open(fileToAppend, encoding='utf-8') as data_file: # noqa: PTH123
json_data = data_file.read()
- assetsToAppend = json.loads(json_data)
+ assetsToAppend = json.loads(json_data) # noqa: N806
assets_array += assetsToAppend
- with open(output_file, 'w', encoding='utf-8') as f:
+ with open(output_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(assets_array, f, indent=2)
diff --git a/modules/createAIM/GeoJSON_to_ASSET/GeoJSON_to_ASSET.py b/modules/createAIM/GeoJSON_to_ASSET/GeoJSON_to_ASSET.py
index 1213ce62c..350ead0da 100644
--- a/modules/createAIM/GeoJSON_to_ASSET/GeoJSON_to_ASSET.py
+++ b/modules/createAIM/GeoJSON_to_ASSET/GeoJSON_to_ASSET.py
@@ -1,4 +1,4 @@
-import argparse
+import argparse # noqa: INP001, D100
import importlib
import json
import os
@@ -15,15 +15,15 @@
# https://stackoverflow.com/questions/50916422/python-typeerror-object-of-type-int64-is-not-json-serializable
-class NpEncoder(json.JSONEncoder):
- def default(self, obj):
+class NpEncoder(json.JSONEncoder): # noqa: D101
+ def default(self, obj): # noqa: D102
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
- return super(NpEncoder, self).default(obj)
+ return super(NpEncoder, self).default(obj) # noqa: UP008
class generalAIMGenerator:
@@ -34,44 +34,44 @@ class generalAIMGenerator:
:param `**kwargs`: The keyword arguments are used for ...
:ivar arg: This is where we store arg
:vartype arg: str
- """
+ """ # noqa: D205, D400
def __init__(self, output_file):
self.output_file = output_file
self.gdf = None
self.filter = None
- def load_asset_gdf(self, source_file):
+ def load_asset_gdf(self, source_file): # noqa: D102
asset_gdf = gpd.read_file(source_file)
self.gdf = asset_gdf
- def set_asset_gdf(self, asset_gdf):
+ def set_asset_gdf(self, asset_gdf): # noqa: D102
self.gdf = asset_gdf
- def selectAssets(self, filter):
+ def selectAssets(self, filter): # noqa: A002, N802, D102
self.filter = filter
# check if a filter is provided for bridges
if self.filter is not None:
- asset_requested = []
+ asset_requested = [] # noqa: F841
for assets in self.filter.split(','):
if '-' in assets:
asset_low, asset_high = assets.split('-')
- assets_requested += list(
+ assets_requested += list( # noqa: F821
range(int(asset_low), int(asset_high) + 1)
)
else:
assets_requested.append(int(assets))
assets_requested = np.array(assets_requested)
- assets_available = self.gdf.index.values
+ assets_available = self.gdf.index.values # noqa: PD011
assets_to_run = assets_requested[
np.where(np.isin(assets_requested, assets_available))[0]
]
else:
- assets_to_run = self.gdf.index.values
+ assets_to_run = self.gdf.index.values # noqa: PD011
self.gdf = self.gdf.loc[assets_to_run, :]
return assets_to_run
- def createAIM(self, asset_idx, component_type=None):
+ def createAIM(self, asset_idx, component_type=None): # noqa: ARG002, N802, D102
# initialize the AIM file
# if component_type is not None:
# asset_id = component_type+"_"+str(asset_idx)
@@ -79,9 +79,9 @@ def createAIM(self, asset_idx, component_type=None):
# asset_id = str(asset_idx)
asset_id = asset_idx
asset = self.gdf.loc[asset_idx, :]
- AIM_i = {
+ AIM_i = { # noqa: N806
'RandomVariables': [],
- 'GeneralInformation': dict(
+ 'GeneralInformation': dict( # noqa: C408
AIM_id=str(asset_id),
location={
'latitude': asset['geometry'].centroid.coords[0][1],
@@ -98,34 +98,34 @@ def createAIM(self, asset_idx, component_type=None):
# AIM_i["GeneralInformation"].update({"assetSubtype":component_type})
return AIM_i
- def dumpAIM(self, AIM_i):
+ def dumpAIM(self, AIM_i): # noqa: N802, N803, D102
# assetSubtype = AIM_i['GeneralInformation'].get("assetSubtype", None)
- componentType = AIM_i['GeneralInformation'].get('type', None)
- outDir = os.path.dirname(self.output_file)
+ componentType = AIM_i['GeneralInformation'].get('type', None) # noqa: N806
+ outDir = os.path.dirname(self.output_file) # noqa: PTH120, N806
if componentType:
- outDir = os.path.join(outDir, componentType)
+ outDir = os.path.join(outDir, componentType) # noqa: PTH118, N806
asset_id = AIM_i['GeneralInformation']['AIM_id']
- AIM_file_name = f'{asset_id}-AIM.json'
- AIM_file_name = os.path.join(outDir, AIM_file_name)
- with open(AIM_file_name, 'w', encoding='utf-8') as f:
+ AIM_file_name = f'{asset_id}-AIM.json' # noqa: N806
+ AIM_file_name = os.path.join(outDir, AIM_file_name) # noqa: PTH118, N806
+ with open(AIM_file_name, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(AIM_i, f, indent=2, cls=NpEncoder)
return AIM_file_name
-class lineAIMGenerator(generalAIMGenerator):
- def breakDownLongLines(self, delta, tolerance=10e-3):
+class lineAIMGenerator(generalAIMGenerator): # noqa: D101
+ def breakDownLongLines(self, delta, tolerance=10e-3): # noqa: N802, D102
edges = self.gdf
- dropedEdges = []
- newEdges = []
+ dropedEdges = [] # noqa: N806
+ newEdges = [] # noqa: N806
crs = edges.crs
- edgesOrig = edges.copy()
+ edgesOrig = edges.copy() # noqa: N806
# edgesOrig["IDbase"] = edgesOrig["OID"].apply(lambda x: x.split('_')[0])
edgesOrig['IDbase'] = edgesOrig.index
- num_segExistingMap = edgesOrig.groupby('IDbase').count().iloc[:, 0].to_dict()
+ num_segExistingMap = edgesOrig.groupby('IDbase').count().iloc[:, 0].to_dict() # noqa: N806
edges_dict = edges.reset_index().to_crs('epsg:6500')
edges_dict = edges_dict.to_dict(orient='records')
for row_ind in range(len(edges_dict)):
- LS = edges_dict[row_ind]['geometry']
+ LS = edges_dict[row_ind]['geometry'] # noqa: N806
num_seg = int(np.ceil(LS.length / delta))
if num_seg == 1:
continue
@@ -134,14 +134,14 @@ def breakDownLongLines(self, delta, tolerance=10e-3):
[LS.interpolate(distance) for distance in distances[:-1]]
+ [LS.coords[-1]]
)
- LS = shapely.ops.snap(LS, points, tolerance)
+ LS = shapely.ops.snap(LS, points, tolerance) # noqa: N806
with warnings.catch_warnings(): # Suppress the warning of points not on
# LS. Shaply will first project the point to the line and then split
warnings.simplefilter('ignore')
- splittedLS = shapely.ops.split(LS, points).geoms
- currentEdge = edges_dict[row_ind].copy()
- num_segExisting = num_segExistingMap[currentEdge['id']]
- for sLS_ind, sLS in enumerate(splittedLS):
+ splittedLS = shapely.ops.split(LS, points).geoms # noqa: N806
+ currentEdge = edges_dict[row_ind].copy() # noqa: N806
+ num_segExisting = num_segExistingMap[currentEdge['id']] # noqa: N806, F841
+ for sLS_ind, sLS in enumerate(splittedLS): # noqa: N806
# create new edge
# if sLS_ind ==0:
# newID = currentEdge["id"]
@@ -149,9 +149,9 @@ def breakDownLongLines(self, delta, tolerance=10e-3):
# newID = currentEdge["id"]+"_"+str(num_segExisting)
# num_segExisting +=1
# num_segExistingMap[currentEdge["id"]] += 1
- newID = currentEdge['id']
- newGeom = sLS
- newEdge = currentEdge.copy()
+ newID = currentEdge['id'] # noqa: N806
+ newGeom = sLS # noqa: N806
+ newEdge = currentEdge.copy() # noqa: N806
newEdge.update({'id': newID, 'geometry': newGeom, 'segID': sLS_ind})
newEdges.append(newEdge)
dropedEdges.append(edges_dict[row_ind]['id'])
@@ -159,7 +159,7 @@ def breakDownLongLines(self, delta, tolerance=10e-3):
edges = edges.reset_index() # Convert "id" from index into a column
if len(newEdges) > 0:
edges['segID'] = 0
- newEdges = gpd.GeoDataFrame(newEdges, crs='epsg:6500').to_crs(crs)
+ newEdges = gpd.GeoDataFrame(newEdges, crs='epsg:6500').to_crs(crs) # noqa: N806
edges = pd.concat([edges, newEdges], ignore_index=True)
edges = edges.sort_values(['id', 'segID'])
edges = (
@@ -170,9 +170,9 @@ def breakDownLongLines(self, delta, tolerance=10e-3):
# self.gdf = edges.reset_index().rename(columns={"index":"AIM_id"})
self.gdf = edges
- def defineConnectivities(
+ def defineConnectivities( # noqa: N802, D102
self,
- AIM_id_prefix=None,
+ AIM_id_prefix=None, # noqa: N803
edges_file_name=None,
nodes_file_name=None,
):
@@ -195,13 +195,13 @@ def defineConnectivities(
# check if first and last are the same
if start == first and end == last:
continue
- elif start == last and end == first:
- newStartID = edges.loc[ind, 'node_end']
- newEndID = edges.loc[ind, 'node_start']
+ elif start == last and end == first: # noqa: RET507
+ newStartID = edges.loc[ind, 'node_end'] # noqa: N806
+ newEndID = edges.loc[ind, 'node_start'] # noqa: N806
edges.loc[ind, 'node_start'] = newStartID
edges.loc[ind, 'node_end'] = newEndID
else:
- print(
+ print( # noqa: T201
ind,
'th row of edges has wrong start/first, end/last pairs, likely a bug of momepy.gdf_to_nx function',
)
@@ -218,38 +218,38 @@ def defineConnectivities(
edges['AIM_id'] = edges['AIM_id'].apply(
lambda x: AIM_id_prefix + '_' + str(x)
)
- outDir = os.path.dirname(self.output_file)
+ outDir = os.path.dirname(self.output_file) # noqa: PTH120, N806
if edges_file_name is not None:
edges.to_file(
- os.path.join(outDir, f'{edges_file_name}.geojson'),
+ os.path.join(outDir, f'{edges_file_name}.geojson'), # noqa: PTH118
driver='GeoJSON',
)
if nodes_file_name is not None:
- nodesNeeded = list(
+ nodesNeeded = list( # noqa: N806
set(
- edges['StartNode'].values.tolist()
- + edges['EndNode'].values.tolist()
+ edges['StartNode'].values.tolist() # noqa: PD011
+ + edges['EndNode'].values.tolist() # noqa: PD011
)
)
nodes = nodes.loc[nodesNeeded, :]
nodes = nodes.to_crs(datacrs)[['nodeID', 'geometry']]
nodes.to_file(
- os.path.join(outDir, f'{nodes_file_name}.geojson'),
+ os.path.join(outDir, f'{nodes_file_name}.geojson'), # noqa: PTH118
driver='GeoJSON',
)
self.gdf = edges
-def split_and_select_components(input_config, asset_source_file):
- component_dict = dict()
- with open(asset_source_file, encoding='utf-8') as f:
+def split_and_select_components(input_config, asset_source_file): # noqa: C901, D103
+ component_dict = dict() # noqa: C408
+ with open(asset_source_file, encoding='utf-8') as f: # noqa: PTH123
source_data = json.load(f)
crs = source_data['crs']
- featureList = source_data['features']
- requested_dict = dict()
+ featureList = source_data['features'] # noqa: N806
+ requested_dict = dict() # noqa: C408
for key, value in input_config.items():
if isinstance(value, dict):
- filterString = value.get('filter', None)
+ filterString = value.get('filter', None) # noqa: N806
if filterString is None:
continue
assets_requested = []
@@ -286,33 +286,33 @@ def split_and_select_components(input_config, asset_source_file):
return component_dict
-def init_workdir(component_dict, outDir):
+def init_workdir(component_dict, outDir): # noqa: N803, D103
os.chdir(outDir)
for dir_or_file in os.listdir(outDir):
if dir_or_file != 'log.txt':
- if os.path.isdir(dir_or_file):
+ if os.path.isdir(dir_or_file): # noqa: PTH112
shutil.rmtree(dir_or_file)
else:
- os.remove(dir_or_file)
- component_dir = dict()
- for comp in component_dict.keys():
- compDir = posixpath.join(outDir, comp)
- os.mkdir(compDir)
+ os.remove(dir_or_file) # noqa: PTH107
+ component_dir = dict() # noqa: C408
+ for comp in component_dict.keys(): # noqa: SIM118
+ compDir = posixpath.join(outDir, comp) # noqa: N806
+ os.mkdir(compDir) # noqa: PTH102
component_dir.update({comp: compDir})
return component_dir
-def create_asset_files(
+def create_asset_files( # noqa: C901, D103
output_file,
asset_source_file,
asset_type,
input_file,
- doParallel,
+ doParallel, # noqa: N803
):
# check if running parallel
- numP = 1
- procID = 0
- runParallel = False
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
+ runParallel = False # noqa: N806
if doParallel == 'True':
mpi_spec = importlib.util.find_spec('mpi4py')
@@ -320,18 +320,18 @@ def create_asset_files(
if found:
from mpi4py import MPI
- runParallel = True
+ runParallel = True # noqa: N806
comm = MPI.COMM_WORLD
- numP = comm.Get_size()
- procID = comm.Get_rank()
- if numP < 2:
- doParallel = 'False'
- runParallel = False
- numP = 1
- procID = 0
- outDir = os.path.dirname(output_file)
+ numP = comm.Get_size() # noqa: N806
+ procID = comm.Get_rank() # noqa: N806
+ if numP < 2: # noqa: PLR2004
+ doParallel = 'False' # noqa: N806
+ runParallel = False # noqa: N806
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
+ outDir = os.path.dirname(output_file) # noqa: PTH120, N806
- with open(input_file, encoding='utf-8') as f:
+ with open(input_file, encoding='utf-8') as f: # noqa: PTH123
input_data = json.load(f)
input_config = input_data['Applications']['Assets'][asset_type][
'ApplicationData'
@@ -341,20 +341,20 @@ def create_asset_files(
# assetSourceFile passed through command may be different from input_config when run on designsafe
component_dict = split_and_select_components(input_config, asset_source_file)
- component_dir = init_workdir(component_dict, outDir)
+ component_dir = init_workdir(component_dict, outDir) # noqa: F841
assets_array = []
for component_type, component_data in component_dict.items():
- geom_type = type(component_data['geometry'].values[0])
+ geom_type = type(component_data['geometry'].values[0]) # noqa: PD011
if geom_type in [shapely.Point, shapely.Polygon]:
# if component_type in ["HwyBridge", "HwyTunnel"]:
- AIMgenerator = generalAIMGenerator(output_file)
+ AIMgenerator = generalAIMGenerator(output_file) # noqa: N806
AIMgenerator.set_asset_gdf(component_data)
- selected_Asset_idxs = AIMgenerator.selectAssets(None)
+ selected_Asset_idxs = AIMgenerator.selectAssets(None) # noqa: N806
# elif component_type in ["Roadway"]:
elif geom_type == shapely.LineString:
- AIMgenerator = lineAIMGenerator(output_file)
+ AIMgenerator = lineAIMGenerator(output_file) # noqa: N806
AIMgenerator.set_asset_gdf(component_data)
- selected_Asset_idxs = AIMgenerator.selectAssets(None)
+ selected_Asset_idxs = AIMgenerator.selectAssets(None) # noqa: N806
# AIMgenerator.breakDownLongLines(roadSegLength)
# # AIMgenerator.defineConnectivities(None, "hwy_edges",\
# # "hwy_nodes")
@@ -363,40 +363,40 @@ def create_asset_files(
# selected_Asset_idxs = AIMgenerator.selectAssets(None)
else:
sys.exit(
- (f'The geometry type {geom_type} defined for the')
+ (f'The geometry type {geom_type} defined for the') # noqa: ISC003
+ (f'components {component_type} is not supported in ')
+ (f'the assets {asset_type}')
)
# for each asset...
count = 0
for asset_idx in selected_Asset_idxs:
- if runParallel == False or (count % numP) == procID:
+ if runParallel == False or (count % numP) == procID: # noqa: E712
# initialize the AIM file
- AIM_i = AIMgenerator.createAIM(asset_idx, component_type)
- AIM_file_name = AIMgenerator.dumpAIM(AIM_i)
+ AIM_i = AIMgenerator.createAIM(asset_idx, component_type) # noqa: N806
+ AIM_file_name = AIMgenerator.dumpAIM(AIM_i) # noqa: N806
assets_array.append(
- dict(
+ dict( # noqa: C408
id=AIM_i['GeneralInformation']['AIM_id'], file=AIM_file_name
)
)
count = count + 1
if procID != 0:
# if not P0, write data to output file with procID in name and barrier
- output_file_p = os.path.join(outDir, f'tmp_{procID}.json')
- with open(output_file_p, 'w', encoding='utf-8') as f:
+ output_file_p = os.path.join(outDir, f'tmp_{procID}.json') # noqa: PTH118
+ with open(output_file_p, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(assets_array, f, indent=0)
comm.Barrier()
else:
- if runParallel == True:
+ if runParallel == True: # noqa: E712
# if parallel & P0, barrier so that all files written above, then loop over other processor files: open, load data and append
comm.Barrier()
for i in range(1, numP):
- fileToAppend = os.path.join(outDir, f'tmp_{i}.json')
- with open(fileToAppend, encoding='utf-8') as data_file:
+ fileToAppend = os.path.join(outDir, f'tmp_{i}.json') # noqa: PTH118, N806
+ with open(fileToAppend, encoding='utf-8') as data_file: # noqa: PTH123
json_data = data_file.read()
- assetsToAppend = json.loads(json_data)
+ assetsToAppend = json.loads(json_data) # noqa: N806
assets_array += assetsToAppend
- with open(output_file, 'w', encoding='utf-8') as f:
+ with open(output_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(assets_array, f, indent=2, cls=NpEncoder)
# else:
# print(f"The asset_type {asset_type} is not one of Buildings, TransportationNetwork or WaterNetwork, and is currently not supported")
diff --git a/modules/createAIM/GeoJSON_to_BIM/GeoJSON_to_BIM.py b/modules/createAIM/GeoJSON_to_BIM/GeoJSON_to_BIM.py
index 4a612d63f..a531ea2e7 100644
--- a/modules/createAIM/GeoJSON_to_BIM/GeoJSON_to_BIM.py
+++ b/modules/createAIM/GeoJSON_to_BIM/GeoJSON_to_BIM.py
@@ -1,9 +1,9 @@
-import argparse
+import argparse # noqa: INP001, D100
import json
import sys
-def create_building_files(output_file, building_source_file, min_id, max_id):
+def create_building_files(output_file, building_source_file, min_id, max_id): # noqa: D103
# check if the min and max values are provided in the right order
if (min_id is not None) and (max_id is not None):
if min_id > max_id:
@@ -11,7 +11,7 @@ def create_building_files(output_file, building_source_file, min_id, max_id):
min_id = max_id
max_id = tmp
- with open(building_source_file, encoding='utf-8') as f:
+ with open(building_source_file, encoding='utf-8') as f: # noqa: PTH123
building_source_list = json.load(f)['features']
buildings_array = []
@@ -26,7 +26,7 @@ def create_building_files(output_file, building_source_file, min_id, max_id):
bldg_loc = bldg_src['geometry']['coordinates']
- BIM_i = {
+ BIM_i = { # noqa: N806
'RandomVariables': [],
'GI': dict(
BIM_id=str(bldg_id),
@@ -35,14 +35,14 @@ def create_building_files(output_file, building_source_file, min_id, max_id):
),
}
- BIM_file_name = f'{bldg_id}-BIM.json'
+ BIM_file_name = f'{bldg_id}-BIM.json' # noqa: N806
- with open(BIM_file_name, 'w', encoding='utf-8') as f:
+ with open(BIM_file_name, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(BIM_i, f, indent=2)
- buildings_array.append(dict(id=str(bldg_id), file=BIM_file_name))
+ buildings_array.append(dict(id=str(bldg_id), file=BIM_file_name)) # noqa: C408
- with open(output_file, 'w', encoding='utf-8') as f:
+ with open(output_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(buildings_array, f, indent=2)
diff --git a/modules/createAIM/INP_FILE/INP_FILE.py b/modules/createAIM/INP_FILE/INP_FILE.py
index 738e4c504..c2a36f946 100644
--- a/modules/createAIM/INP_FILE/INP_FILE.py
+++ b/modules/createAIM/INP_FILE/INP_FILE.py
@@ -1,4 +1,4 @@
-import argparse
+import argparse # noqa: INP001, D100
import importlib
import json
import os
@@ -15,15 +15,15 @@
# https://stackoverflow.com/questions/50916422/python-typeerror-object-of-type-int64-is-not-json-serializable
-class NpEncoder(json.JSONEncoder):
- def default(self, obj):
+class NpEncoder(json.JSONEncoder): # noqa: D101
+ def default(self, obj): # noqa: D102
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
- return super(NpEncoder, self).default(obj)
+ return super(NpEncoder, self).default(obj) # noqa: UP008
class generalAIMGenerator:
@@ -34,44 +34,44 @@ class generalAIMGenerator:
:param `**kwargs`: The keyword arguments are used for ...
:ivar arg: This is where we store arg
:vartype arg: str
- """
+ """ # noqa: D205, D400
def __init__(self, output_file):
self.output_file = output_file
self.gdf = None
self.filter = None
- def load_asset_gdf(self, source_file):
+ def load_asset_gdf(self, source_file): # noqa: D102
asset_gdf = gpd.read_file(source_file)
self.gdf = asset_gdf
- def set_asset_gdf(self, asset_gdf):
+ def set_asset_gdf(self, asset_gdf): # noqa: D102
self.gdf = asset_gdf
- def selectAssets(self, filter):
+ def selectAssets(self, filter): # noqa: A002, N802, D102
self.filter = filter
# check if a filter is provided for bridges
if self.filter is not None:
- asset_requested = []
+ asset_requested = [] # noqa: F841
for assets in self.filter.split(','):
if '-' in assets:
asset_low, asset_high = assets.split('-')
- assets_requested += list(
+ assets_requested += list( # noqa: F821
range(int(asset_low), int(asset_high) + 1)
)
else:
assets_requested.append(int(assets))
assets_requested = np.array(assets_requested)
- assets_available = self.gdf.index.values
+ assets_available = self.gdf.index.values # noqa: PD011
assets_to_run = assets_requested[
np.where(np.isin(assets_requested, assets_available))[0]
]
else:
- assets_to_run = self.gdf.index.values
+ assets_to_run = self.gdf.index.values # noqa: PD011
self.gdf = self.gdf.loc[assets_to_run, :]
return assets_to_run
- def createAIM(self, asset_idx, component_type=None):
+ def createAIM(self, asset_idx, component_type=None): # noqa: ARG002, N802, D102
# initialize the AIM file
# if component_type is not None:
# asset_id = component_type+"_"+str(asset_idx)
@@ -79,9 +79,9 @@ def createAIM(self, asset_idx, component_type=None):
# asset_id = str(asset_idx)
asset_id = asset_idx
asset = self.gdf.loc[asset_idx, :]
- AIM_i = {
+ AIM_i = { # noqa: N806
'RandomVariables': [],
- 'GeneralInformation': dict(
+ 'GeneralInformation': dict( # noqa: C408
AIM_id=str(asset_id),
location={
'latitude': asset['geometry'].centroid.coords[0][1],
@@ -98,34 +98,34 @@ def createAIM(self, asset_idx, component_type=None):
# AIM_i["GeneralInformation"].update({"assetSubtype":component_type})
return AIM_i
- def dumpAIM(self, AIM_i):
+ def dumpAIM(self, AIM_i): # noqa: N802, N803, D102
# assetSubtype = AIM_i['GeneralInformation'].get("assetSubtype", None)
- componentType = AIM_i['GeneralInformation'].get('type', None)
- outDir = os.path.dirname(self.output_file)
+ componentType = AIM_i['GeneralInformation'].get('type', None) # noqa: N806
+ outDir = os.path.dirname(self.output_file) # noqa: PTH120, N806
if componentType:
- outDir = os.path.join(outDir, componentType)
+ outDir = os.path.join(outDir, componentType) # noqa: PTH118, N806
asset_id = AIM_i['GeneralInformation']['AIM_id']
- AIM_file_name = f'{asset_id}-AIM.json'
- AIM_file_name = os.path.join(outDir, AIM_file_name)
- with open(AIM_file_name, 'w', encoding='utf-8') as f:
+ AIM_file_name = f'{asset_id}-AIM.json' # noqa: N806
+ AIM_file_name = os.path.join(outDir, AIM_file_name) # noqa: PTH118, N806
+ with open(AIM_file_name, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(AIM_i, f, indent=2, cls=NpEncoder)
return AIM_file_name
-class lineAIMGenerator(generalAIMGenerator):
- def breakDownLongLines(self, delta, tolerance=10e-3):
+class lineAIMGenerator(generalAIMGenerator): # noqa: D101
+ def breakDownLongLines(self, delta, tolerance=10e-3): # noqa: N802, D102
edges = self.gdf
- dropedEdges = []
- newEdges = []
+ dropedEdges = [] # noqa: N806
+ newEdges = [] # noqa: N806
crs = edges.crs
- edgesOrig = edges.copy()
+ edgesOrig = edges.copy() # noqa: N806
# edgesOrig["IDbase"] = edgesOrig["OID"].apply(lambda x: x.split('_')[0])
edgesOrig['IDbase'] = edgesOrig.index
- num_segExistingMap = edgesOrig.groupby('IDbase').count().iloc[:, 0].to_dict()
+ num_segExistingMap = edgesOrig.groupby('IDbase').count().iloc[:, 0].to_dict() # noqa: N806
edges_dict = edges.reset_index().to_crs('epsg:6500')
edges_dict = edges_dict.to_dict(orient='records')
for row_ind in range(len(edges_dict)):
- LS = edges_dict[row_ind]['geometry']
+ LS = edges_dict[row_ind]['geometry'] # noqa: N806
num_seg = int(np.ceil(LS.length / delta))
if num_seg == 1:
continue
@@ -134,14 +134,14 @@ def breakDownLongLines(self, delta, tolerance=10e-3):
[LS.interpolate(distance) for distance in distances[:-1]]
+ [LS.coords[-1]]
)
- LS = shapely.ops.snap(LS, points, tolerance)
+ LS = shapely.ops.snap(LS, points, tolerance) # noqa: N806
with warnings.catch_warnings(): # Suppress the warning of points not on
# LS. Shaply will first project the point to the line and then split
warnings.simplefilter('ignore')
- splittedLS = shapely.ops.split(LS, points).geoms
- currentEdge = edges_dict[row_ind].copy()
- num_segExisting = num_segExistingMap[currentEdge['id']]
- for sLS_ind, sLS in enumerate(splittedLS):
+ splittedLS = shapely.ops.split(LS, points).geoms # noqa: N806
+ currentEdge = edges_dict[row_ind].copy() # noqa: N806
+ num_segExisting = num_segExistingMap[currentEdge['id']] # noqa: N806, F841
+ for sLS_ind, sLS in enumerate(splittedLS): # noqa: N806
# create new edge
# if sLS_ind ==0:
# newID = currentEdge["id"]
@@ -149,9 +149,9 @@ def breakDownLongLines(self, delta, tolerance=10e-3):
# newID = currentEdge["id"]+"_"+str(num_segExisting)
# num_segExisting +=1
# num_segExistingMap[currentEdge["id"]] += 1
- newID = currentEdge['id']
- newGeom = sLS
- newEdge = currentEdge.copy()
+ newID = currentEdge['id'] # noqa: N806
+ newGeom = sLS # noqa: N806
+ newEdge = currentEdge.copy() # noqa: N806
newEdge.update({'id': newID, 'geometry': newGeom, 'segID': sLS_ind})
newEdges.append(newEdge)
dropedEdges.append(edges_dict[row_ind]['id'])
@@ -159,7 +159,7 @@ def breakDownLongLines(self, delta, tolerance=10e-3):
edges = edges.reset_index() # Convert "id" from index into a column
if len(newEdges) > 0:
edges['segID'] = 0
- newEdges = gpd.GeoDataFrame(newEdges, crs='epsg:6500').to_crs(crs)
+ newEdges = gpd.GeoDataFrame(newEdges, crs='epsg:6500').to_crs(crs) # noqa: N806
edges = pd.concat([edges, newEdges], ignore_index=True)
edges = edges.sort_values(['id', 'segID'])
edges = (
@@ -170,9 +170,9 @@ def breakDownLongLines(self, delta, tolerance=10e-3):
# self.gdf = edges.reset_index().rename(columns={"index":"AIM_id"})
self.gdf = edges
- def defineConnectivities(
+ def defineConnectivities( # noqa: N802, D102
self,
- AIM_id_prefix=None,
+ AIM_id_prefix=None, # noqa: N803
edges_file_name=None,
nodes_file_name=None,
):
@@ -195,13 +195,13 @@ def defineConnectivities(
# check if first and last are the same
if start == first and end == last:
continue
- elif start == last and end == first:
- newStartID = edges.loc[ind, 'node_end']
- newEndID = edges.loc[ind, 'node_start']
+ elif start == last and end == first: # noqa: RET507
+ newStartID = edges.loc[ind, 'node_end'] # noqa: N806
+ newEndID = edges.loc[ind, 'node_start'] # noqa: N806
edges.loc[ind, 'node_start'] = newStartID
edges.loc[ind, 'node_end'] = newEndID
else:
- print(
+ print( # noqa: T201
ind,
'th row of edges has wrong start/first, end/last pairs, likely a bug of momepy.gdf_to_nx function',
)
@@ -218,39 +218,39 @@ def defineConnectivities(
edges['AIM_id'] = edges['AIM_id'].apply(
lambda x: AIM_id_prefix + '_' + str(x)
)
- outDir = os.path.dirname(self.output_file)
+ outDir = os.path.dirname(self.output_file) # noqa: PTH120, N806
if edges_file_name is not None:
edges.to_file(
- os.path.join(outDir, f'{edges_file_name}.geojson'),
+ os.path.join(outDir, f'{edges_file_name}.geojson'), # noqa: PTH118
driver='GeoJSON',
)
if nodes_file_name is not None:
- nodesNeeded = list(
+ nodesNeeded = list( # noqa: N806
set(
- edges['StartNode'].values.tolist()
- + edges['EndNode'].values.tolist()
+ edges['StartNode'].values.tolist() # noqa: PD011
+ + edges['EndNode'].values.tolist() # noqa: PD011
)
)
nodes = nodes.loc[nodesNeeded, :]
nodes = nodes.to_crs(datacrs)[['nodeID', 'geometry']]
nodes.to_file(
- os.path.join(outDir, f'{nodes_file_name}.geojson'),
+ os.path.join(outDir, f'{nodes_file_name}.geojson'), # noqa: PTH118
driver='GeoJSON',
)
self.gdf = edges
-def split_and_select_components(input_config):
- component_dict = dict()
+def split_and_select_components(input_config): # noqa: C901, D103
+ component_dict = dict() # noqa: C408
asset_source_file = input_config['assetSourceFile']
- with open(asset_source_file, encoding='utf-8') as f:
+ with open(asset_source_file, encoding='utf-8') as f: # noqa: PTH123
source_data = json.load(f)
crs = source_data['crs']
- featureList = source_data['features']
- requested_dict = dict()
+ featureList = source_data['features'] # noqa: N806
+ requested_dict = dict() # noqa: C408
for key, value in input_config.items():
if isinstance(value, dict):
- filterString = value.get('filter', None)
+ filterString = value.get('filter', None) # noqa: N806
if filterString is None:
continue
assets_requested = []
@@ -287,27 +287,27 @@ def split_and_select_components(input_config):
return component_dict
-def init_workdir(component_dict, outDir):
+def init_workdir(component_dict, outDir): # noqa: N803, D103
os.chdir(outDir)
for dir_or_file in os.listdir(outDir):
if dir_or_file != 'log.txt':
- if os.path.isdir(dir_or_file):
+ if os.path.isdir(dir_or_file): # noqa: PTH112
shutil.rmtree(dir_or_file)
else:
- os.remove(dir_or_file)
- component_dir = dict()
- for comp in component_dict.keys():
- compDir = posixpath.join(outDir, comp)
- os.mkdir(compDir)
+ os.remove(dir_or_file) # noqa: PTH107
+ component_dir = dict() # noqa: C408
+ for comp in component_dict.keys(): # noqa: SIM118
+ compDir = posixpath.join(outDir, comp) # noqa: N806
+ os.mkdir(compDir) # noqa: PTH102
component_dir.update({comp: compDir})
return component_dir
-def create_asset_files(output_file, asset_type, input_file, doParallel):
+def create_asset_files(output_file, asset_type, input_file, doParallel): # noqa: C901, N803, D103
# check if running parallel
- numP = 1
- procID = 0
- runParallel = False
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
+ runParallel = False # noqa: N806
if doParallel == 'True':
mpi_spec = importlib.util.find_spec('mpi4py')
@@ -315,18 +315,18 @@ def create_asset_files(output_file, asset_type, input_file, doParallel):
if found:
from mpi4py import MPI
- runParallel = True
+ runParallel = True # noqa: N806
comm = MPI.COMM_WORLD
- numP = comm.Get_size()
- procID = comm.Get_rank()
- if numP < 2:
- doParallel = 'False'
- runParallel = False
- numP = 1
- procID = 0
- outDir = os.path.dirname(output_file)
+ numP = comm.Get_size() # noqa: N806
+ procID = comm.Get_rank() # noqa: N806
+ if numP < 2: # noqa: PLR2004
+ doParallel = 'False' # noqa: N806
+ runParallel = False # noqa: N806
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
+ outDir = os.path.dirname(output_file) # noqa: PTH120, N806
- with open(input_file, encoding='utf-8') as f:
+ with open(input_file, encoding='utf-8') as f: # noqa: PTH123
input_data = json.load(f)
input_config = input_data['Applications']['Assets'][asset_type][
'ApplicationData'
@@ -334,20 +334,20 @@ def create_asset_files(output_file, asset_type, input_file, doParallel):
# if input_config.get("Roadway", None):
# roadSegLength = float(input_config['Roadway'].get('maxRoadLength_m', "100000"))
component_dict = split_and_select_components(input_config)
- component_dir = init_workdir(component_dict, outDir)
+ component_dir = init_workdir(component_dict, outDir) # noqa: F841
assets_array = []
for component_type, component_data in component_dict.items():
- geom_type = type(component_data['geometry'].values[0])
+ geom_type = type(component_data['geometry'].values[0]) # noqa: PD011
if geom_type in [shapely.Point, shapely.Polygon]:
# if component_type in ["HwyBridge", "HwyTunnel"]:
- AIMgenerator = generalAIMGenerator(output_file)
+ AIMgenerator = generalAIMGenerator(output_file) # noqa: N806
AIMgenerator.set_asset_gdf(component_data)
- selected_Asset_idxs = AIMgenerator.selectAssets(None)
+ selected_Asset_idxs = AIMgenerator.selectAssets(None) # noqa: N806
# elif component_type in ["Roadway"]:
elif geom_type == shapely.LineString:
- AIMgenerator = lineAIMGenerator(output_file)
+ AIMgenerator = lineAIMGenerator(output_file) # noqa: N806
AIMgenerator.set_asset_gdf(component_data)
- selected_Asset_idxs = AIMgenerator.selectAssets(None)
+ selected_Asset_idxs = AIMgenerator.selectAssets(None) # noqa: N806
# AIMgenerator.breakDownLongLines(roadSegLength)
# # AIMgenerator.defineConnectivities(None, "hwy_edges",\
# # "hwy_nodes")
@@ -356,40 +356,40 @@ def create_asset_files(output_file, asset_type, input_file, doParallel):
# selected_Asset_idxs = AIMgenerator.selectAssets(None)
else:
sys.exit(
- (f'The geometry type {geom_type} defined for the')
+ (f'The geometry type {geom_type} defined for the') # noqa: ISC003
+ (f'components {component_type} is not supported in ')
+ (f'the assets {asset_type}')
)
# for each asset...
count = 0
for asset_idx in selected_Asset_idxs:
- if runParallel == False or (count % numP) == procID:
+ if runParallel == False or (count % numP) == procID: # noqa: E712
# initialize the AIM file
- AIM_i = AIMgenerator.createAIM(asset_idx, component_type)
- AIM_file_name = AIMgenerator.dumpAIM(AIM_i)
+ AIM_i = AIMgenerator.createAIM(asset_idx, component_type) # noqa: N806
+ AIM_file_name = AIMgenerator.dumpAIM(AIM_i) # noqa: N806
assets_array.append(
- dict(
+ dict( # noqa: C408
id=AIM_i['GeneralInformation']['AIM_id'], file=AIM_file_name
)
)
count = count + 1
if procID != 0:
# if not P0, write data to output file with procID in name and barrier
- output_file_p = os.path.join(outDir, f'tmp_{procID}.json')
- with open(output_file_p, 'w', encoding='utf-8') as f:
+ output_file_p = os.path.join(outDir, f'tmp_{procID}.json') # noqa: PTH118
+ with open(output_file_p, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(assets_array, f, indent=0)
comm.Barrier()
else:
- if runParallel == True:
+ if runParallel == True: # noqa: E712
# if parallel & P0, barrier so that all files written above, then loop over other processor files: open, load data and append
comm.Barrier()
for i in range(1, numP):
- fileToAppend = os.path.join(outDir, f'tmp_{i}.json')
- with open(fileToAppend, encoding='utf-8') as data_file:
+ fileToAppend = os.path.join(outDir, f'tmp_{i}.json') # noqa: PTH118, N806
+ with open(fileToAppend, encoding='utf-8') as data_file: # noqa: PTH123
json_data = data_file.read()
- assetsToAppend = json.loads(json_data)
+ assetsToAppend = json.loads(json_data) # noqa: N806
assets_array += assetsToAppend
- with open(output_file, 'w', encoding='utf-8') as f:
+ with open(output_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(assets_array, f, indent=2, cls=NpEncoder)
# else:
# print(f"The asset_type {asset_type} is not one of Buildings, TransportationNetwork or WaterNetwork, and is currently not supported")
diff --git a/modules/createAIM/JSON_to_AIM/GeoJSON_to_AIM_transport.py b/modules/createAIM/JSON_to_AIM/GeoJSON_to_AIM_transport.py
index d7d5ddd68..2b03397e8 100644
--- a/modules/createAIM/JSON_to_AIM/GeoJSON_to_AIM_transport.py
+++ b/modules/createAIM/JSON_to_AIM/GeoJSON_to_AIM_transport.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -55,17 +55,17 @@
# Break down long roads according to delta
-def breakDownLongEdges(edges, delta, tolerance=10e-3):
- dropedEdges = []
- newEdges = []
+def breakDownLongEdges(edges, delta, tolerance=10e-3): # noqa: N802, D103
+ dropedEdges = [] # noqa: N806
+ newEdges = [] # noqa: N806
crs = edges.crs
- edgesOrig = edges.copy()
+ edgesOrig = edges.copy() # noqa: N806
edgesOrig['IDbase'] = edgesOrig['ID'].apply(lambda x: x.split('_')[0])
- num_segExistingMap = edgesOrig.groupby('IDbase').count()['ID'].to_dict()
+ num_segExistingMap = edgesOrig.groupby('IDbase').count()['ID'].to_dict() # noqa: N806
edges_dict = edges.reset_index().to_crs('epsg:6500')
edges_dict = edges_dict.to_dict(orient='records')
for row_ind in range(len(edges_dict)):
- LS = edges_dict[row_ind]['geometry']
+ LS = edges_dict[row_ind]['geometry'] # noqa: N806
num_seg = int(np.ceil(LS.length / delta))
if num_seg == 1:
continue
@@ -74,24 +74,24 @@ def breakDownLongEdges(edges, delta, tolerance=10e-3):
[LS.interpolate(distance) for distance in distances[:-1]]
+ [LS.boundary.geoms[1]]
)
- LS = shapely.ops.snap(LS, points, tolerance)
- splittedLS = shapely.ops.split(LS, points).geoms
- currentEdge = edges_dict[row_ind].copy()
- num_segExisting = num_segExistingMap[currentEdge['ID'].split('_')[0]]
- for sLS_ind, sLS in enumerate(splittedLS):
+ LS = shapely.ops.snap(LS, points, tolerance) # noqa: N806
+ splittedLS = shapely.ops.split(LS, points).geoms # noqa: N806
+ currentEdge = edges_dict[row_ind].copy() # noqa: N806
+ num_segExisting = num_segExistingMap[currentEdge['ID'].split('_')[0]] # noqa: N806
+ for sLS_ind, sLS in enumerate(splittedLS): # noqa: N806
# create new edge
if sLS_ind == 0:
- newID = currentEdge['ID']
+ newID = currentEdge['ID'] # noqa: N806
else:
- newID = (
+ newID = ( # noqa: N806
currentEdge['ID'].split('_')[0] + '_' + str(num_segExisting + 1)
)
- num_segExisting += 1
+ num_segExisting += 1 # noqa: N806
num_segExistingMap[currentEdge['ID'].split('_')[0]] = (
num_segExistingMap[currentEdge['ID'].split('_')[0]] + 1
)
- newGeom = sLS
- newEdge = currentEdge.copy()
+ newGeom = sLS # noqa: N806
+ newEdge = currentEdge.copy() # noqa: N806
newEdge.update(
{
'ID': newID,
@@ -105,13 +105,13 @@ def breakDownLongEdges(edges, delta, tolerance=10e-3):
dropedEdges.append(row_ind)
edges = edges.drop(dropedEdges)
if len(newEdges) > 0:
- newEdges = gpd.GeoDataFrame(newEdges, crs='epsg:6500').to_crs(crs)
+ newEdges = gpd.GeoDataFrame(newEdges, crs='epsg:6500').to_crs(crs) # noqa: N806
edges = pd.concat([edges, newEdges], ignore_index=True)
edges = edges.reset_index(drop=True)
- return edges
+ return edges # noqa: RET504
-def create_asset_files(
+def create_asset_files( # noqa: C901, D103, PLR0915
output_file,
asset_source_road,
asset_source_bridge,
@@ -119,16 +119,16 @@ def create_asset_files(
bridge_filter,
tunnel_filter,
road_filter,
- doParallel,
- roadSegLength,
+ doParallel, # noqa: N803
+ roadSegLength, # noqa: N803
):
# these imports are here to save time when the app is called without
# the -getRV flag
# check if running parallel
- numP = 1
- procID = 0
- runParallel = False
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
+ runParallel = False # noqa: N806
if doParallel == 'True':
mpi_spec = importlib.util.find_spec('mpi4py')
@@ -136,18 +136,18 @@ def create_asset_files(
if found:
from mpi4py import MPI
- runParallel = True
+ runParallel = True # noqa: N806
comm = MPI.COMM_WORLD
- numP = comm.Get_size()
- procID = comm.Get_rank()
- if numP < 2:
- doParallel = 'False'
- runParallel = False
- numP = 1
- procID = 0
+ numP = comm.Get_size() # noqa: N806
+ procID = comm.Get_rank() # noqa: N806
+ if numP < 2: # noqa: PLR2004
+ doParallel = 'False' # noqa: N806
+ runParallel = False # noqa: N806
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
# Get the out dir, may not always be in the results folder if multiple assets are used
- outDir = os.path.dirname(output_file)
+ outDir = os.path.dirname(output_file) # noqa: PTH120, N806
# check if a filter is provided for bridges
if bridge_filter is not None:
@@ -182,49 +182,49 @@ def create_asset_files(
# load the GeoJSON file with the asset information
if asset_source_road is not None:
- roadsGDF = gpd.read_file(asset_source_road)
+ roadsGDF = gpd.read_file(asset_source_road) # noqa: N806
datacrs = roadsGDF.crs
else:
- roadsGDF = gpd.GeoDataFrame.from_dict({})
+ roadsGDF = gpd.GeoDataFrame.from_dict({}) # noqa: N806
if asset_source_bridge is not None:
- bridgesGDF = gpd.read_file(asset_source_bridge)
+ bridgesGDF = gpd.read_file(asset_source_bridge) # noqa: N806
else:
- bridgesGDF = gpd.GeoDataFrame.from_dict({})
+ bridgesGDF = gpd.GeoDataFrame.from_dict({}) # noqa: N806
if asset_source_tunnel is not None:
- tunnelsGDF = gpd.read_file(asset_source_tunnel)
+ tunnelsGDF = gpd.read_file(asset_source_tunnel) # noqa: N806
else:
- tunnelsGDF = gpd.GeoDataFrame.from_dict({})
+ tunnelsGDF = gpd.GeoDataFrame.from_dict({}) # noqa: N806
# if there is a filter, then pull out only the required bridges
if bridge_filter is not None:
- assets_available = bridgesGDF.index.values
+ assets_available = bridgesGDF.index.values # noqa: PD011
bridges_to_run = bridges_requested[
np.where(np.isin(bridges_requested, assets_available))[0]
]
selected_bridges = bridgesGDF.loc[bridges_to_run]
else:
selected_bridges = bridgesGDF
- bridges_to_run = bridgesGDF.index.values
+ bridges_to_run = bridgesGDF.index.values # noqa: PD011
# if there is a filter, then pull out only the required tunnels
if tunnel_filter is not None:
- assets_available = tunnelsGDF.index.values
+ assets_available = tunnelsGDF.index.values # noqa: PD011
tunnels_to_run = tunnels_requested[
np.where(np.isin(tunnels_requested, assets_available))[0]
]
selected_tunnels = tunnelsGDF.loc[tunnels_to_run]
else:
selected_tunnels = tunnelsGDF
- tunnels_to_run = tunnelsGDF.index.values
+ tunnels_to_run = tunnelsGDF.index.values # noqa: PD011
# if there is a filter, then pull out only the required roads
if road_filter is not None:
- assets_available = roadsGDF.index.values
+ assets_available = roadsGDF.index.values # noqa: PD011
roads_to_run = roads_requested[
np.where(np.isin(roads_requested, assets_available))[0]
]
selected_roads = roadsGDF.loc[roads_to_run]
else:
selected_roads = roadsGDF
- roads_to_run = roadsGDF.index.values
+ roads_to_run = roadsGDF.index.values # noqa: PD011
if len(selected_roads) > 0:
# Break down road network
@@ -246,17 +246,17 @@ def create_asset_files(
# check if first and last are the same
if start == first and end == last:
continue
- elif start == last and end == first:
- newStartID = edges.loc[ind, 'node_end']
- newEndID = edges.loc[ind, 'node_start']
+ elif start == last and end == first: # noqa: RET507
+ newStartID = edges.loc[ind, 'node_end'] # noqa: N806
+ newEndID = edges.loc[ind, 'node_start'] # noqa: N806
edges.loc[ind, 'node_start'] = newStartID
edges.loc[ind, 'node_end'] = newEndID
else:
- print(
+ print( # noqa: T201
ind,
'th row of edges has wrong start/first, end/last pairs, likely a bug of momepy.gdf_to_nx function',
)
- locationGS = gpd.GeoSeries(
+ locationGS = gpd.GeoSeries( # noqa: N806
edges['geometry'].apply(lambda x: x.centroid), crs=edges.crs
).to_crs(datacrs)
edges = (
@@ -269,19 +269,19 @@ def create_asset_files(
edges = edges.reset_index().rename(columns={'index': 'AIM_id'})
edges['AIM_id'] = edges['AIM_id'].apply(lambda x: 'r' + str(x))
edges.to_file(
- os.path.join(outDir, 'roadNetworkEdgesSelected.geojson'),
+ os.path.join(outDir, 'roadNetworkEdgesSelected.geojson'), # noqa: PTH118
driver='GeoJSON',
)
- nodesNeeded = list(
+ nodesNeeded = list( # noqa: N806
set(
- edges['start_node'].values.tolist()
- + edges['end_node'].values.tolist()
+ edges['start_node'].values.tolist() # noqa: PD011
+ + edges['end_node'].values.tolist() # noqa: PD011
)
)
nodes = nodes.loc[nodesNeeded, :]
nodes = nodes.to_crs(datacrs)[['nodeID', 'geometry']]
nodes.to_file(
- os.path.join(outDir, 'roadNetworkNodesSelected.geojson'),
+ os.path.join(outDir, 'roadNetworkNodesSelected.geojson'), # noqa: PTH118
driver='GeoJSON',
)
else:
@@ -292,13 +292,13 @@ def create_asset_files(
assets_array = []
for ind, asset in selected_bridges.iterrows():
asset_id = 'b' + str(bridges_to_run[ind])
- ind += 1
- if runParallel == False or (count % numP) == procID:
+ ind += 1 # noqa: PLW2901
+ if runParallel == False or (count % numP) == procID: # noqa: E712
# initialize the AIM file
# locationNodeID = str(asset["location"])
- AIM_i = {
+ AIM_i = { # noqa: N806
'RandomVariables': [],
- 'GeneralInformation': dict(
+ 'GeneralInformation': dict( # noqa: C408
AIM_id=asset_id,
location={
'latitude': asset['geometry'].centroid.coords[0][1],
@@ -311,27 +311,27 @@ def create_asset_files(
AIM_i['GeneralInformation'].update(asset)
# AIM_i["GeneralInformation"].update({"locationNode":locationNodeID})
AIM_i['GeneralInformation'].update({'assetSubtype': 'hwyBridge'})
- AIM_file_name = f'{asset_id}-AIM.json'
+ AIM_file_name = f'{asset_id}-AIM.json' # noqa: N806
- AIM_file_name = os.path.join(outDir, AIM_file_name)
+ AIM_file_name = os.path.join(outDir, AIM_file_name) # noqa: PTH118, N806
- with open(AIM_file_name, 'w', encoding='utf-8') as f:
+ with open(AIM_file_name, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(AIM_i, f, indent=2)
- assets_array.append(dict(id=str(asset_id), file=AIM_file_name))
+ assets_array.append(dict(id=str(asset_id), file=AIM_file_name)) # noqa: C408
count = count + 1
ind = 0
for ind, asset in selected_tunnels.iterrows():
asset_id = 't' + str(tunnels_to_run[ind])
- ind += 1
- if runParallel == False or (count % numP) == procID:
+ ind += 1 # noqa: PLW2901
+ if runParallel == False or (count % numP) == procID: # noqa: E712
# initialize the AIM file
# locationNodeID = str(asset["location"])
- AIM_i = {
+ AIM_i = { # noqa: N806
'RandomVariables': [],
- 'GeneralInformation': dict(
+ 'GeneralInformation': dict( # noqa: C408
AIM_id=asset_id,
location={
'latitude': asset['geometry'].centroid.coords[0][1],
@@ -344,14 +344,14 @@ def create_asset_files(
AIM_i['GeneralInformation'].update(asset)
# AIM_i["GeneralInformation"].update({"locationNode":locationNodeID})
AIM_i['GeneralInformation'].update({'assetSubtype': 'hwyTunnel'})
- AIM_file_name = f'{asset_id}-AIM.json'
+ AIM_file_name = f'{asset_id}-AIM.json' # noqa: N806
- AIM_file_name = os.path.join(outDir, AIM_file_name)
+ AIM_file_name = os.path.join(outDir, AIM_file_name) # noqa: PTH118, N806
- with open(AIM_file_name, 'w', encoding='utf-8') as f:
+ with open(AIM_file_name, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(AIM_i, f, indent=2)
- assets_array.append(dict(id=str(asset_id), file=AIM_file_name))
+ assets_array.append(dict(id=str(asset_id), file=AIM_file_name)) # noqa: C408
count = count + 1
@@ -359,11 +359,11 @@ def create_asset_files(
for row_ind in edges.index:
asset_id = 'r' + str(row_ind)
ind += 1
- if runParallel == False or (count % numP) == procID:
+ if runParallel == False or (count % numP) == procID: # noqa: E712
# initialize the AIM file
- AIM_i = {
+ AIM_i = { # noqa: N806
'RandomVariables': [],
- 'GeneralInformation': dict(
+ 'GeneralInformation': dict( # noqa: C408
AIM_id=asset_id,
location={
'latitude': edges.loc[row_ind, 'location_lat'],
@@ -385,40 +385,40 @@ def create_asset_files(
}
AIM_i['GeneralInformation'].update({'geometry': str(geom)})
AIM_i['GeneralInformation'].update({'assetSubtype': 'roadway'})
- AIM_file_name = f'{asset_id}-AIM.json'
+ AIM_file_name = f'{asset_id}-AIM.json' # noqa: N806
- AIM_file_name = os.path.join(outDir, AIM_file_name)
+ AIM_file_name = os.path.join(outDir, AIM_file_name) # noqa: PTH118, N806
- with open(AIM_file_name, 'w', encoding='utf-8') as f:
+ with open(AIM_file_name, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(AIM_i, f, indent=2)
- assets_array.append(dict(id=str(asset_id), file=AIM_file_name))
+ assets_array.append(dict(id=str(asset_id), file=AIM_file_name)) # noqa: C408
count = count + 1
if procID != 0:
# if not P0, write data to output file with procID in name and barrier
- output_file = os.path.join(outDir, f'tmp_{procID}.json')
+ output_file = os.path.join(outDir, f'tmp_{procID}.json') # noqa: PTH118
- with open(output_file, 'w', encoding='utf-8') as f:
+ with open(output_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(assets_array, f, indent=0)
comm.Barrier()
else:
- if runParallel == True:
+ if runParallel == True: # noqa: E712
# if parallel & P0, barrier so that all files written above, then loop over other processor files: open, load data and append
comm.Barrier()
for i in range(1, numP):
- fileToAppend = os.path.join(outDir, f'tmp_{i}.json')
- with open(fileToAppend, encoding='utf-8') as data_file:
+ fileToAppend = os.path.join(outDir, f'tmp_{i}.json') # noqa: PTH118, N806
+ with open(fileToAppend, encoding='utf-8') as data_file: # noqa: PTH123
json_data = data_file.read()
- assetsToAppend = json.loads(json_data)
+ assetsToAppend = json.loads(json_data) # noqa: N806
assets_array += assetsToAppend
- with open(output_file, 'w', encoding='utf-8') as f:
+ with open(output_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(assets_array, f, indent=2)
diff --git a/modules/createAIM/JSON_to_AIM/JSON_to_AIM_transport.py b/modules/createAIM/JSON_to_AIM/JSON_to_AIM_transport.py
index a97f5462d..69d2d3265 100644
--- a/modules/createAIM/JSON_to_AIM/JSON_to_AIM_transport.py
+++ b/modules/createAIM/JSON_to_AIM/JSON_to_AIM_transport.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -57,10 +57,10 @@
# Remove the nodes with 2 neibours
# https://stackoverflow.com/questions/56380053/combine-edges-when-node-degree-is-n-in-networkx
# Needs parallel
-def remove2neibourEdges(nodesID_to_remove, nodes_to_remove, edges, graph):
+def remove2neibourEdges(nodesID_to_remove, nodes_to_remove, edges, graph): # noqa: N802, N803, D103
# For each of those nodes
- removedID_list = [] # nodes with two neighbors. Removed from graph
- skippedID_list = [] # nodes involved in loops. Skipped removing.
+ removedID_list = [] # nodes with two neighbors. Removed from graph # noqa: N806
+ skippedID_list = [] # nodes involved in loops. Skipped removing. # noqa: N806
error_list = [] # nodes run into error. Left the node in the graph as is.
for i in range(len(nodesID_to_remove)):
nodeid = nodesID_to_remove[i]
@@ -70,10 +70,10 @@ def remove2neibourEdges(nodesID_to_remove, nodes_to_remove, edges, graph):
if (
edge1.shape[0] == 1
and edge2.shape[0] == 1
- and edge1['node_start'].values[0] != edge2['node_end'].values[0]
+ and edge1['node_start'].values[0] != edge2['node_end'].values[0] # noqa: PD011
):
pass # Do things after continue
- elif edge1.shape[0] == 0 and edge2.shape[0] == 2:
+ elif edge1.shape[0] == 0 and edge2.shape[0] == 2: # noqa: PLR2004
ns = edges.loc[edge2.index[0], 'node_start']
ne = edges.loc[edge2.index[0], 'node_end']
edges.loc[edge2.index[0], 'node_start'] = ne
@@ -84,7 +84,7 @@ def remove2neibourEdges(nodesID_to_remove, nodes_to_remove, edges, graph):
].reverse()
edge1 = edges[edges['node_end'] == nodeid]
edge2 = edges[edges['node_start'] == nodeid]
- elif edge1.shape[0] == 2 and edge2.shape[0] == 0:
+ elif edge1.shape[0] == 2 and edge2.shape[0] == 0: # noqa: PLR2004
ns = edges.loc[edge1.index[1], 'node_start']
ne = edges.loc[edge1.index[1], 'node_end']
edges.loc[edge1.index[1], 'node_start'] = ne
@@ -100,40 +100,40 @@ def remove2neibourEdges(nodesID_to_remove, nodes_to_remove, edges, graph):
continue
try:
removedID_list.append(nodeid)
- newLineCoords = (
+ newLineCoords = ( # noqa: N806
list(
- edge1['geometry'].values[0].coords
+ edge1['geometry'].values[0].coords # noqa: PD011
)
+ list(
- edge2['geometry'].values[0].coords[1:]
+ edge2['geometry'].values[0].coords[1:] # noqa: PD011
)
)
# newLineCoords.append(edge2["geometry"].values[0].coords[1:])
edges.loc[edge1.index, 'geometry'] = shapely.LineString(newLineCoords)
- edges.loc[edge1.index, 'node_end'] = edge2['node_end'].values[0]
- edges.drop(edge2.index, axis=0, inplace=True)
- newEdge = list(graph.neighbors(node))
+ edges.loc[edge1.index, 'node_end'] = edge2['node_end'].values[0] # noqa: PD011
+ edges.drop(edge2.index, axis=0, inplace=True) # noqa: PD002
+ newEdge = list(graph.neighbors(node)) # noqa: N806
graph.add_edge(newEdge[0], newEdge[1])
# And delete the node
graph.remove_node(node)
- except:
+ except: # noqa: E722
error_list.append(nodeid)
return edges
# Break down long roads according to delta
-def breakDownLongEdges(edges, delta, roadDF, nodesDF, tolerance=10e-3):
- dropedEdges = []
- newEdges = []
+def breakDownLongEdges(edges, delta, roadDF, nodesDF, tolerance=10e-3): # noqa: N802, N803, D103
+ dropedEdges = [] # noqa: N806
+ newEdges = [] # noqa: N806
crs = edges.crs
edges_dict = edges.reset_index()
nodes_dict = nodesDF.reset_index()
edges_dict = edges.to_dict(orient='records')
nodes_dict = nodesDF.to_dict(orient='records')
roadDF['IDbase'] = roadDF['ID'].apply(lambda x: x.split('_')[0])
- num_segExistingMap = roadDF.groupby('IDbase').count()['ID'].to_dict()
+ num_segExistingMap = roadDF.groupby('IDbase').count()['ID'].to_dict() # noqa: N806
for row_ind in range(len(edges_dict)):
- LS = edges_dict[row_ind]['geometry']
+ LS = edges_dict[row_ind]['geometry'] # noqa: N806
num_seg = int(np.ceil(LS.length / delta))
if num_seg == 1:
continue
@@ -142,47 +142,47 @@ def breakDownLongEdges(edges, delta, roadDF, nodesDF, tolerance=10e-3):
[LS.interpolate(distance) for distance in distances[:-1]]
+ [LS.boundary.geoms[1]]
)
- LS = shapely.ops.snap(LS, points, tolerance)
- splittedLS = shapely.ops.split(LS, points).geoms
- currentEdge = edges_dict[row_ind].copy()
- num_segExisting = num_segExistingMap[currentEdge['ID'].split('_')[0]]
- newNodes = []
- currentNodesNum = len(nodes_dict) - 1 # nodeID starts with 0
+ LS = shapely.ops.snap(LS, points, tolerance) # noqa: N806
+ splittedLS = shapely.ops.split(LS, points).geoms # noqa: N806
+ currentEdge = edges_dict[row_ind].copy() # noqa: N806
+ num_segExisting = num_segExistingMap[currentEdge['ID'].split('_')[0]] # noqa: N806
+ newNodes = [] # noqa: N806
+ currentNodesNum = len(nodes_dict) - 1 # nodeID starts with 0 # noqa: N806
for pt in points.geoms:
- newNode_dict = {
+ newNode_dict = { # noqa: N806
'nodeID': currentNodesNum,
'oldNodeID': np.nan,
'geometry': pt,
}
- currentNodesNum += 1
+ currentNodesNum += 1 # noqa: N806
newNodes.append(newNode_dict)
- newNodes = newNodes[
+ newNodes = newNodes[ # noqa: N806
1:-1
] # The first and last points already exists in the nodes DF. delete them
nodes_dict = nodes_dict + newNodes
- for sLS_ind, sLS in enumerate(splittedLS):
+ for sLS_ind, sLS in enumerate(splittedLS): # noqa: N806
# create new edge
if sLS_ind == 0:
- newID = currentEdge['ID']
+ newID = currentEdge['ID'] # noqa: N806
else:
- newID = (
+ newID = ( # noqa: N806
currentEdge['ID'].split('_')[0] + '_' + str(num_segExisting + 1)
)
- num_segExisting += 1
+ num_segExisting += 1 # noqa: N806
num_segExistingMap[currentEdge['ID'].split('_')[0]] = (
num_segExistingMap[currentEdge['ID'].split('_')[0]] + 1
)
- newGeom = sLS
+ newGeom = sLS # noqa: N806
if sLS_ind == 0:
- newEdge_ns = currentEdge['node_start']
- newEdge_ne = newNodes[sLS_ind]['nodeID']
+ newEdge_ns = currentEdge['node_start'] # noqa: N806
+ newEdge_ne = newNodes[sLS_ind]['nodeID'] # noqa: N806
elif sLS_ind < len(splittedLS) - 1:
- newEdge_ns = newNodes[sLS_ind - 1]['nodeID']
- newEdge_ne = newNodes[sLS_ind]['nodeID']
+ newEdge_ns = newNodes[sLS_ind - 1]['nodeID'] # noqa: N806
+ newEdge_ne = newNodes[sLS_ind]['nodeID'] # noqa: N806
else:
- newEdge_ns = newNodes[sLS_ind - 1]['nodeID']
- newEdge_ne = currentEdge['node_end']
- newEdge = currentEdge.copy()
+ newEdge_ns = newNodes[sLS_ind - 1]['nodeID'] # noqa: N806
+ newEdge_ne = currentEdge['node_end'] # noqa: N806
+ newEdge = currentEdge.copy() # noqa: N806
newEdge.update(
{
'ID': newID,
@@ -198,29 +198,29 @@ def breakDownLongEdges(edges, delta, roadDF, nodesDF, tolerance=10e-3):
dropedEdges.append(row_ind)
edges = edges.drop(dropedEdges)
if len(newEdges) > 0:
- newEdges = gpd.GeoDataFrame(newEdges, crs=crs)
+ newEdges = gpd.GeoDataFrame(newEdges, crs=crs) # noqa: N806
edges = pd.concat([edges, newEdges], ignore_index=True)
edges = edges.reset_index(drop=True)
- nodesDF = gpd.GeoDataFrame(nodes_dict, crs=crs)
+ nodesDF = gpd.GeoDataFrame(nodes_dict, crs=crs) # noqa: N806
return edges, nodesDF
-def create_asset_files(
+def create_asset_files( # noqa: C901, D103, PLR0915
output_file,
asset_source_file,
bridge_filter,
tunnel_filter,
road_filter,
- doParallel,
- roadSegLength,
+ doParallel, # noqa: N803
+ roadSegLength, # noqa: N803
):
# these imports are here to save time when the app is called without
# the -getRV flag
# check if running parallel
- numP = 1
- procID = 0
- runParallel = False
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
+ runParallel = False # noqa: N806
if doParallel == 'True':
mpi_spec = importlib.util.find_spec('mpi4py')
@@ -228,18 +228,18 @@ def create_asset_files(
if found:
from mpi4py import MPI
- runParallel = True
+ runParallel = True # noqa: N806
comm = MPI.COMM_WORLD
- numP = comm.Get_size()
- procID = comm.Get_rank()
- if numP < 2:
- doParallel = 'False'
- runParallel = False
- numP = 1
- procID = 0
+ numP = comm.Get_size() # noqa: N806
+ procID = comm.Get_rank() # noqa: N806
+ if numP < 2: # noqa: PLR2004
+ doParallel = 'False' # noqa: N806
+ runParallel = False # noqa: N806
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
# Get the out dir, may not always be in the results folder if multiple assets are used
- outDir = os.path.dirname(output_file)
+ outDir = os.path.dirname(output_file) # noqa: PTH120, N806
# check if a filter is provided for bridges
if bridge_filter is not None:
@@ -273,7 +273,7 @@ def create_asset_files(
roads_requested = np.array(roads_requested)
# load the JSON file with the asset information
- with open(asset_source_file, encoding='utf-8') as sourceFile:
+ with open(asset_source_file, encoding='utf-8') as sourceFile: # noqa: PTH123, N806
assets_dict = json.load(sourceFile)
bridges_array = assets_dict.get('hwy_bridges', None)
@@ -281,13 +281,13 @@ def create_asset_files(
roads_array = assets_dict.get('roadways', None)
nodes_dict = assets_dict.get('nodes', None)
if nodes_dict is None:
- print(
+ print( # noqa: T201
"JSON_to_AIM_tranportation ERROR: A key of 'nodes' is not found in the asset file: "
+ asset_source_file
)
return
if tunnels_array is None and bridges_array is None and roads_array is None:
- print(
+ print( # noqa: T201
"JSON_to_AIM_tranportation ERROR: None of 'hwy_bridges', 'hwy_tunnels', nor 'roadways' is not found in the asset file: "
+ asset_source_file
)
@@ -302,7 +302,7 @@ def create_asset_files(
np.where(np.isin(bridges_requested, assets_available))[0]
]
for i in bridges_to_run:
- selected_bridges.append(bridges_array[i])
+ selected_bridges.append(bridges_array[i]) # noqa: PERF401
else:
selected_bridges = bridges_array
bridges_to_run = list(range(len(bridges_array)))
@@ -314,7 +314,7 @@ def create_asset_files(
np.where(np.isin(tunnels_requested, assets_available))[0]
]
for i in tunnels_to_run:
- selected_tunnels.append(tunnels_array[i])
+ selected_tunnels.append(tunnels_array[i]) # noqa: PERF401
else:
selected_tunnels = tunnels_array
tunnels_to_run = list(range(len(tunnels_array)))
@@ -326,7 +326,7 @@ def create_asset_files(
np.where(np.isin(roads_requested, assets_available))[0]
]
for i in roads_to_run:
- selected_roads.append(roads_array[i])
+ selected_roads.append(roads_array[i]) # noqa: PERF401
else:
selected_roads = roads_array
roads_to_run = list(range(len(roads_array)))
@@ -334,16 +334,16 @@ def create_asset_files(
# Reconstruct road network
datacrs = assets_dict.get('crs', None)
if datacrs is None:
- print(
+ print( # noqa: T201
"JSON_to_AIM_tranportation WARNING: 'crs' is not found in the asset file: "
+ asset_source_file
)
- print('The CRS epsg:4326 is used by default')
+ print('The CRS epsg:4326 is used by default') # noqa: T201
datacrs = 'epsg:4326'
if len(selected_roads) > 0:
- roadDF = pd.DataFrame.from_dict(selected_roads)
- LineStringList = []
+ roadDF = pd.DataFrame.from_dict(selected_roads) # noqa: N806
+ LineStringList = [] # noqa: N806
for ind in roadDF.index:
start_node = nodes_dict[str(roadDF.loc[ind, 'start_node'])]
end_node = nodes_dict[str(roadDF.loc[ind, 'end_node'])]
@@ -356,8 +356,8 @@ def create_asset_files(
)
)
roadDF['geometry'] = LineStringList
- roadDF = roadDF[['ID', 'roadType', 'lanes', 'maxMPH', 'geometry']]
- roadGDF = gpd.GeoDataFrame(roadDF, geometry='geometry', crs=datacrs)
+ roadDF = roadDF[['ID', 'roadType', 'lanes', 'maxMPH', 'geometry']] # noqa: N806
+ roadGDF = gpd.GeoDataFrame(roadDF, geometry='geometry', crs=datacrs) # noqa: N806
graph = momepy.gdf_to_nx(roadGDF.to_crs('epsg:6500'), approach='primal')
with warnings.catch_warnings(): # Suppress the warning of disconnected components in the graph
warnings.simplefilter('ignore')
@@ -366,7 +366,7 @@ def create_asset_files(
)
# Oneway or twoway is not considered in D&L, remove duplicated edges
edges = edges[
- edges.duplicated(['node_start', 'node_end'], keep='first') == False
+ edges.duplicated(['node_start', 'node_end'], keep='first') == False # noqa: E712
]
edges = edges.reset_index(drop=True).drop('mm_len', axis=1)
# Some edges has start_node as the last point in the geometry and end_node as the first point, check and reorder
@@ -378,32 +378,32 @@ def create_asset_files(
# check if first and last are the same
if start == first and end == last:
continue
- elif start == last and end == first:
- newStartID = edges.loc[ind, 'node_end']
- newEndID = edges.loc[ind, 'node_start']
+ elif start == last and end == first: # noqa: RET507
+ newStartID = edges.loc[ind, 'node_end'] # noqa: N806
+ newEndID = edges.loc[ind, 'node_start'] # noqa: N806
edges.loc[ind, 'node_start'] = newStartID
edges.loc[ind, 'node_end'] = newEndID
else:
- print(
+ print( # noqa: T201
ind,
'th row of edges has wrong start/first, end/last pairs, likely a bug of momepy.gdf_to_nx function',
)
- nodesID_to_remove = [
+ nodesID_to_remove = [ # noqa: N806
i
for i, n in enumerate(graph.nodes)
- if len(list(graph.neighbors(n))) == 2
+ if len(list(graph.neighbors(n))) == 2 # noqa: PLR2004
]
nodes_to_remove = [
n
for i, n in enumerate(graph.nodes)
- if len(list(graph.neighbors(n))) == 2
+ if len(list(graph.neighbors(n))) == 2 # noqa: PLR2004
]
edges = remove2neibourEdges(nodesID_to_remove, nodes_to_remove, edges, graph)
- remainingNodesOldID = list(
+ remainingNodesOldID = list( # noqa: N806
set(
- edges['node_start'].values.tolist()
- + edges['node_end'].values.tolist()
+ edges['node_start'].values.tolist() # noqa: PD011
+ + edges['node_end'].values.tolist() # noqa: PD011
)
)
nodes = nodes.loc[remainingNodesOldID, :].sort_index()
@@ -434,7 +434,7 @@ def create_asset_files(
)
edges, nodes = breakDownLongEdges(edges, roadSegLength, roadDF, nodes)
- locationGS = gpd.GeoSeries(
+ locationGS = gpd.GeoSeries( # noqa: N806
edges['geometry'].apply(lambda x: x.centroid), crs=edges.crs
).to_crs(datacrs)
edges = edges.to_crs(datacrs).rename(
@@ -446,20 +446,20 @@ def create_asset_files(
edges = edges.reset_index().rename(columns={'index': 'AIM_id'})
edges['AIM_id'] = edges['AIM_id'].apply(lambda x: 'r' + str(x))
edges.to_file(
- os.path.join(outDir, 'roadNetworkEdgesSelected.geojson'),
+ os.path.join(outDir, 'roadNetworkEdgesSelected.geojson'), # noqa: PTH118
driver='GeoJSON',
)
- nodesNeeded = list(
+ nodesNeeded = list( # noqa: N806
set(
- edges['start_node'].values.tolist()
- + edges['end_node'].values.tolist()
+ edges['start_node'].values.tolist() # noqa: PD011
+ + edges['end_node'].values.tolist() # noqa: PD011
)
)
nodes = nodes.loc[nodesNeeded, :]
nodes = nodes.to_crs(datacrs)[['nodeID', 'geometry']]
nodes.to_file(
- os.path.join(outDir, 'roadNetworkNodesSelected.geojson'),
+ os.path.join(outDir, 'roadNetworkNodesSelected.geojson'), # noqa: PTH118
driver='GeoJSON',
)
else:
@@ -470,12 +470,12 @@ def create_asset_files(
for asset in selected_bridges:
asset_id = 'b' + str(bridges_to_run[ind])
ind += 1
- if runParallel == False or (count % numP) == procID:
+ if runParallel == False or (count % numP) == procID: # noqa: E712
# initialize the AIM file
- locationNodeID = str(asset['location'])
- AIM_i = {
+ locationNodeID = str(asset['location']) # noqa: N806
+ AIM_i = { # noqa: N806
'RandomVariables': [],
- 'GeneralInformation': dict(
+ 'GeneralInformation': dict( # noqa: C408
AIM_id=asset_id,
location={
'latitude': nodes_dict[locationNodeID]['lat'],
@@ -488,14 +488,14 @@ def create_asset_files(
AIM_i['GeneralInformation'].update(asset)
AIM_i['GeneralInformation'].update({'locationNode': locationNodeID})
AIM_i['GeneralInformation'].update({'assetSubtype': 'hwy_bridge'})
- AIM_file_name = f'{asset_id}-AIM.json'
+ AIM_file_name = f'{asset_id}-AIM.json' # noqa: N806
- AIM_file_name = os.path.join(outDir, AIM_file_name)
+ AIM_file_name = os.path.join(outDir, AIM_file_name) # noqa: PTH118, N806
- with open(AIM_file_name, 'w', encoding='utf-8') as f:
+ with open(AIM_file_name, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(AIM_i, f, indent=2)
- assets_array.append(dict(id=str(asset_id), file=AIM_file_name))
+ assets_array.append(dict(id=str(asset_id), file=AIM_file_name)) # noqa: C408
count = count + 1
@@ -503,12 +503,12 @@ def create_asset_files(
for asset in selected_tunnels:
asset_id = 't' + str(tunnels_to_run[ind])
ind += 1
- if runParallel == False or (count % numP) == procID:
+ if runParallel == False or (count % numP) == procID: # noqa: E712
# initialize the AIM file
- locationNodeID = str(asset['location'])
- AIM_i = {
+ locationNodeID = str(asset['location']) # noqa: N806
+ AIM_i = { # noqa: N806
'RandomVariables': [],
- 'GeneralInformation': dict(
+ 'GeneralInformation': dict( # noqa: C408
AIM_id=asset_id,
location={
'latitude': nodes_dict[locationNodeID]['lat'],
@@ -521,14 +521,14 @@ def create_asset_files(
AIM_i['GeneralInformation'].update(asset)
AIM_i['GeneralInformation'].update({'locationNode': locationNodeID})
AIM_i['GeneralInformation'].update({'assetSubtype': 'hwy_tunnel'})
- AIM_file_name = f'{asset_id}-AIM.json'
+ AIM_file_name = f'{asset_id}-AIM.json' # noqa: N806
- AIM_file_name = os.path.join(outDir, AIM_file_name)
+ AIM_file_name = os.path.join(outDir, AIM_file_name) # noqa: PTH118, N806
- with open(AIM_file_name, 'w', encoding='utf-8') as f:
+ with open(AIM_file_name, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(AIM_i, f, indent=2)
- assets_array.append(dict(id=str(asset_id), file=AIM_file_name))
+ assets_array.append(dict(id=str(asset_id), file=AIM_file_name)) # noqa: C408
count = count + 1
@@ -536,11 +536,11 @@ def create_asset_files(
for row_ind in edges.index:
asset_id = 'r' + str(row_ind)
ind += 1
- if runParallel == False or (count % numP) == procID:
+ if runParallel == False or (count % numP) == procID: # noqa: E712
# initialize the AIM file
- AIM_i = {
+ AIM_i = { # noqa: N806
'RandomVariables': [],
- 'GeneralInformation': dict(
+ 'GeneralInformation': dict( # noqa: C408
AIM_id=asset_id,
location={
'latitude': edges.loc[row_ind, 'location_lat'],
@@ -562,40 +562,40 @@ def create_asset_files(
}
AIM_i['GeneralInformation'].update({'geometry': str(geom)})
AIM_i['GeneralInformation'].update({'assetSubtype': 'roadway'})
- AIM_file_name = f'{asset_id}-AIM.json'
+ AIM_file_name = f'{asset_id}-AIM.json' # noqa: N806
- AIM_file_name = os.path.join(outDir, AIM_file_name)
+ AIM_file_name = os.path.join(outDir, AIM_file_name) # noqa: PTH118, N806
- with open(AIM_file_name, 'w', encoding='utf-8') as f:
+ with open(AIM_file_name, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(AIM_i, f, indent=2)
- assets_array.append(dict(id=str(asset_id), file=AIM_file_name))
+ assets_array.append(dict(id=str(asset_id), file=AIM_file_name)) # noqa: C408
count = count + 1
if procID != 0:
# if not P0, write data to output file with procID in name and barrier
- output_file = os.path.join(outDir, f'tmp_{procID}.json')
+ output_file = os.path.join(outDir, f'tmp_{procID}.json') # noqa: PTH118
- with open(output_file, 'w', encoding='utf-8') as f:
+ with open(output_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(assets_array, f, indent=0)
comm.Barrier()
else:
- if runParallel == True:
+ if runParallel == True: # noqa: E712
# if parallel & P0, barrier so that all files written above, then loop over other processor files: open, load data and append
comm.Barrier()
for i in range(1, numP):
- fileToAppend = os.path.join(outDir, f'tmp_{i}.json')
- with open(fileToAppend, encoding='utf-8') as data_file:
+ fileToAppend = os.path.join(outDir, f'tmp_{i}.json') # noqa: PTH118, N806
+ with open(fileToAppend, encoding='utf-8') as data_file: # noqa: PTH123
json_data = data_file.read()
- assetsToAppend = json.loads(json_data)
+ assetsToAppend = json.loads(json_data) # noqa: N806
assets_array += assetsToAppend
- with open(output_file, 'w', encoding='utf-8') as f:
+ with open(output_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(assets_array, f, indent=2)
diff --git a/modules/createEDP/simpleEDP/simpleEDP.py b/modules/createEDP/simpleEDP/simpleEDP.py
index 2541a20b7..4ca06bcdc 100644
--- a/modules/createEDP/simpleEDP/simpleEDP.py
+++ b/modules/createEDP/simpleEDP/simpleEDP.py
@@ -1,8 +1,8 @@
-import sys
+import sys # noqa: INP001, D100
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
@@ -10,15 +10,15 @@
import json
-def write_RV(AIM_input_path, EDP_input_path, EDP_type):
+def write_RV(AIM_input_path, EDP_input_path, EDP_type): # noqa: N802, N803, D103
# load the AIM file
- with open(AIM_input_path, encoding='utf-8') as f:
- AIM_in = json.load(f)
+ with open(AIM_input_path, encoding='utf-8') as f: # noqa: PTH123
+ AIM_in = json.load(f) # noqa: N806
- EDP_list = []
- if 'EDP' in AIM_in.keys():
+ EDP_list = [] # noqa: N806
+ if 'EDP' in AIM_in.keys(): # noqa: SIM118
for edp in AIM_in['EDP']:
- EDP_list.append(
+ EDP_list.append( # noqa: PERF401
{
'type': edp['type'],
'cline': edp.get('cline', '1'),
@@ -45,7 +45,7 @@ def write_RV(AIM_input_path, EDP_input_path, EDP_type):
}
)
- EDP_json = {
+ EDP_json = { # noqa: N806
'RandomVariables': [],
'total_number_edp': len(EDP_list),
'EngineeringDemandParameters': [
@@ -53,11 +53,11 @@ def write_RV(AIM_input_path, EDP_input_path, EDP_type):
],
}
- with open(EDP_input_path, 'w') as f:
+ with open(EDP_input_path, 'w') as f: # noqa: PTH123
json.dump(EDP_json, f, indent=2)
-def create_EDP(AIM_input_path, EDP_input_path, EDP_type):
+def create_EDP(AIM_input_path, EDP_input_path, EDP_type): # noqa: ARG001, N802, N803, D103
pass
diff --git a/modules/createEDP/surrogateEDP/surrogateEDP.py b/modules/createEDP/surrogateEDP/surrogateEDP.py
index 1a6988c6d..190f950cd 100644
--- a/modules/createEDP/surrogateEDP/surrogateEDP.py
+++ b/modules/createEDP/surrogateEDP/surrogateEDP.py
@@ -1,9 +1,9 @@
-import os
+import os # noqa: INP001, D100
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
@@ -11,10 +11,10 @@
import json
-def write_RV(AIM_input_path, EDP_input_path, EDP_type):
+def write_RV(AIM_input_path, EDP_input_path, EDP_type): # noqa: ARG001, N802, N803, D103
# load the AIM file
- with open(AIM_input_path, encoding='utf-8') as f:
- root_AIM = json.load(f)
+ with open(AIM_input_path, encoding='utf-8') as f: # noqa: PTH123
+ root_AIM = json.load(f) # noqa: N806
#
# Is this the correct application
@@ -24,29 +24,29 @@ def write_RV(AIM_input_path, EDP_input_path, EDP_type):
root_AIM['Applications']['Modeling']['Application']
!= 'SurrogateGPBuildingModel'
):
- with open('../workflow.err', 'w') as f:
+ with open('../workflow.err', 'w') as f: # noqa: PTH123
f.write(
'Do not select [None] in the EDP tab. [None] is used only when using pre-trained surrogate, i.e. when [Surrogate] is selected in the SIM Tab.'
)
- exit(-1)
+ exit(-1) # noqa: PLR1722
#
# Get EDP info from surrogate model file
#
- print('General Information tab is ignored')
- root_SAM = root_AIM['Applications']['Modeling']
+ print('General Information tab is ignored') # noqa: T201
+ root_SAM = root_AIM['Applications']['Modeling'] # noqa: N806
- surrogate_path = os.path.join(
+ surrogate_path = os.path.join( # noqa: PTH118
root_SAM['ApplicationData']['MS_Path'],
root_SAM['ApplicationData']['mainScript'],
)
- print(surrogate_path)
+ print(surrogate_path) # noqa: T201
- with open(surrogate_path, encoding='utf-8') as f:
+ with open(surrogate_path, encoding='utf-8') as f: # noqa: PTH123
surrogate_model = json.load(f)
- root_EDP = surrogate_model['EDP']
+ root_EDP = surrogate_model['EDP'] # noqa: N806
# if it is surrogate,
# Save Load EDP.json from standard surrogate models and write it to EDP
@@ -77,12 +77,12 @@ def write_RV(AIM_input_path, EDP_input_path, EDP_type):
"responses": EDP_list
},]
}
- """
- with open(EDP_input_path, 'w', encoding='utf-8') as f:
+ """ # noqa: W291
+ with open(EDP_input_path, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(root_EDP, f, indent=2)
-def create_EDP(AIM_input_path, EDP_input_path, EDP_type):
+def create_EDP(AIM_input_path, EDP_input_path, EDP_type): # noqa: ARG001, N802, N803, D103
pass
diff --git a/modules/createEDP/userEDP_R/UserDefinedEDP.py b/modules/createEDP/userEDP_R/UserDefinedEDP.py
index c0bf9a3d5..39fc50cb2 100644
--- a/modules/createEDP/userEDP_R/UserDefinedEDP.py
+++ b/modules/createEDP/userEDP_R/UserDefinedEDP.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -42,29 +42,29 @@
import sys
-def write_RV(AIM_file, EVENT_file, EDP_file, EDP_specs):
+def write_RV(AIM_file, EVENT_file, EDP_file, EDP_specs): # noqa: N802, N803, D103
# We do this to provide an option for different behavior under setup,
# even though it is unlikely to have random variables for EDPs.
write_EDP(AIM_file, EVENT_file, EDP_file, EDP_specs)
-def write_EDP(AIM_file, EVENT_file, EDP_file, EDP_specs):
- with open(AIM_file) as f:
+def write_EDP(AIM_file, EVENT_file, EDP_file, EDP_specs): # noqa: N802, N803, D103
+ with open(AIM_file) as f: # noqa: PTH123
bim_file = json.load(f)
- with open(EVENT_file) as f:
- event_file = json.load(f)
+ with open(EVENT_file) as f: # noqa: PTH123
+ event_file = json.load(f) # noqa: F841
stories = bim_file['GeneralInformation']['NumberOfStories']
- with open(EDP_specs) as f:
+ with open(EDP_specs) as f: # noqa: PTH123
edp_specs = json.load(f)
- EDP_locs = edp_specs['locations']
- EDP_types = edp_specs['EDP_types']
+ EDP_locs = edp_specs['locations'] # noqa: N806
+ EDP_types = edp_specs['EDP_types'] # noqa: N806
- EDP_list = []
- total_EDP_num = 0
+ EDP_list = [] # noqa: N806
+ total_EDP_num = 0 # noqa: N806
for edp_name, edp_data in EDP_types.items():
for loc_id, loc_data in edp_data.items():
@@ -85,7 +85,7 @@ def write_EDP(AIM_file, EVENT_file, EDP_file, EDP_specs):
'scalar_data': [],
}
)
- total_EDP_num += len(loc_data)
+ total_EDP_num += len(loc_data) # noqa: N806
else:
EDP_list.append(
{
@@ -98,7 +98,7 @@ def write_EDP(AIM_file, EVENT_file, EDP_file, EDP_specs):
'scalar_data': [],
}
)
- total_EDP_num += len(loc_data)
+ total_EDP_num += len(loc_data) # noqa: N806
edp_file = {
'RandomVariables': [],
@@ -106,7 +106,7 @@ def write_EDP(AIM_file, EVENT_file, EDP_file, EDP_specs):
'EngineeringDemandParameters': [{'name': '...', 'responses': EDP_list}],
}
- with open(EDP_file, 'w') as f:
+ with open(EDP_file, 'w') as f: # noqa: PTH123
json.dump(edp_file, f, indent=2)
diff --git a/modules/createEVENT/ASCE7_WindSpeed/ASCE7_WindSpeed.py b/modules/createEVENT/ASCE7_WindSpeed/ASCE7_WindSpeed.py
index 169f32b37..0dc3e8818 100644
--- a/modules/createEVENT/ASCE7_WindSpeed/ASCE7_WindSpeed.py
+++ b/modules/createEVENT/ASCE7_WindSpeed/ASCE7_WindSpeed.py
@@ -1,9 +1,9 @@
-import os
+import os # noqa: INP001, D100
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
@@ -11,31 +11,31 @@
import json
-def write_RV(BIM_input_path, EVENT_input_path):
+def write_RV(BIM_input_path, EVENT_input_path): # noqa: ARG001, N802, N803, D103
# create the empty EVENT.json file
- EVENT_in = {'Events': []}
+ EVENT_in = {'Events': []} # noqa: N806
- with open(EVENT_input_path, 'w') as f:
+ with open(EVENT_input_path, 'w') as f: # noqa: PTH123
json.dump(EVENT_in, f, indent=2)
- # TODO: if there are multiple events, we need to create a random variable for them
+ # TODO: if there are multiple events, we need to create a random variable for them # noqa: TD002
-def get_windspeed(BIM_input_path, EVENT_input_path, wind_database_path, severity):
- sys.path.insert(0, os.getcwd())
+def get_windspeed(BIM_input_path, EVENT_input_path, wind_database_path, severity): # noqa: N803, D103
+ sys.path.insert(0, os.getcwd()) # noqa: PTH109
# load the BIM file
- with open(BIM_input_path) as f:
- BIM_in = json.load(f)
+ with open(BIM_input_path) as f: # noqa: PTH123
+ BIM_in = json.load(f) # noqa: N806
# load the EVENT file
- with open(EVENT_input_path) as f:
- EVENT_in = json.load(f)
+ with open(EVENT_input_path) as f: # noqa: PTH123
+ EVENT_in = json.load(f) # noqa: N806
# if there is a wind database path provided
if wind_database_path is not None:
# then we need to load the wind data from there
- with open(wind_database_path) as f:
+ with open(wind_database_path) as f: # noqa: PTH123
wind_db = json.load(f)
# the location id is stored in the BIM file
@@ -54,7 +54,7 @@ def get_windspeed(BIM_input_path, EVENT_input_path, wind_database_path, severity
event['Events'][0]['type'] == 'ASCE7_WindSpeed'
):
event_info = event['Events'][0]
- with open(event_info['fileName']) as f:
+ with open(event_info['fileName']) as f: # noqa: PTH123
wind_speed_in = json.load(f)
event_id = wind_speed_in['id']
@@ -76,7 +76,7 @@ def get_windspeed(BIM_input_path, EVENT_input_path, wind_database_path, severity
}
EVENT_in['Events'].append(event_json)
- with open(EVENT_input_path, 'w') as f:
+ with open(EVENT_input_path, 'w') as f: # noqa: PTH123
json.dump(EVENT_in, f, indent=2)
diff --git a/modules/createEVENT/CFDEvent/CFDEvent.py b/modules/createEVENT/CFDEvent/CFDEvent.py
index b4207d89d..40062bcf0 100644
--- a/modules/createEVENT/CFDEvent/CFDEvent.py
+++ b/modules/createEVENT/CFDEvent/CFDEvent.py
@@ -1,25 +1,25 @@
-import argparse
+import argparse # noqa: INP001, D100
import json
-class FloorForces:
+class FloorForces: # noqa: D101
def __init__(self):
self.X = [0]
self.Y = [0]
self.Z = [0]
-def directionToDof(direction):
- """Converts direction to degree of freedom"""
- directioMap = {'X': 1, 'Y': 2, 'Z': 3}
+def directionToDof(direction): # noqa: N802
+ """Converts direction to degree of freedom""" # noqa: D400, D401
+ directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806
return directioMap[direction]
-def addFloorForceToEvent(patternsArray, force, direction, floor):
- """Add force (one component) time series and pattern in the event file"""
- seriesName = 'WindForceSeries_' + str(floor) + direction
- patternName = 'WindForcePattern_' + str(floor) + direction
+def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803
+ """Add force (one component) time series and pattern in the event file""" # noqa: D400
+ seriesName = 'WindForceSeries_' + str(floor) + direction # noqa: N806
+ patternName = 'WindForcePattern_' + str(floor) + direction # noqa: N806
pattern = {
'name': patternName,
'timeSeries': seriesName,
@@ -31,10 +31,10 @@ def addFloorForceToEvent(patternsArray, force, direction, floor):
patternsArray.append(pattern)
-def writeEVENT(forces, eventFilePath):
- """This method writes the EVENT.json file"""
- patternsArray = []
- windEventJson = {
+def writeEVENT(forces, eventFilePath): # noqa: N802, N803
+ """This method writes the EVENT.json file""" # noqa: D400, D401, D404
+ patternsArray = [] # noqa: N806
+ windEventJson = { # noqa: N806
'type': 'Wind',
'subtype': 'OpenFOAM CFD Expert Event',
'pattern': patternsArray,
@@ -44,20 +44,20 @@ def writeEVENT(forces, eventFilePath):
}
# Creating the event dictionary that will be used to export the EVENT json file
- eventDict = {'randomVariables': [], 'Events': [windEventJson]}
+ eventDict = {'randomVariables': [], 'Events': [windEventJson]} # noqa: N806
# Adding floor forces
- for floorForces in forces:
+ for floorForces in forces: # noqa: N806
floor = forces.index(floorForces) + 1
addFloorForceToEvent(patternsArray, floorForces.X, 'X', floor)
addFloorForceToEvent(patternsArray, floorForces.Y, 'Y', floor)
- with open(eventFilePath, 'w', encoding='utf-8') as eventsFile:
+ with open(eventFilePath, 'w', encoding='utf-8') as eventsFile: # noqa: PTH123, N806
json.dump(eventDict, eventsFile)
-def GetFloorsCount(BIMFilePath):
- with open(BIMFilePath) as BIMFile:
+def GetFloorsCount(BIMFilePath): # noqa: N802, N803, D103
+ with open(BIMFilePath) as BIMFile: # noqa: PTH123, N806
bim = json.load(BIMFile)
return int(bim['GeneralInformation']['stories'])
@@ -78,11 +78,11 @@ def GetFloorsCount(BIMFilePath):
# parsing arguments
arguments, unknowns = parser.parse_known_args()
- if arguments.getRV == True:
+ if arguments.getRV == True: # noqa: E712
# Read the number of floors
- floorsCount = GetFloorsCount(arguments.filenameAIM)
+ floorsCount = GetFloorsCount(arguments.filenameAIM) # noqa: N816
forces = []
- for i in range(floorsCount):
- forces.append(FloorForces())
+ for i in range(floorsCount): # noqa: B007
+ forces.append(FloorForces()) # noqa: PERF401
# write the event file
writeEVENT(forces, arguments.filenameEVENT)
diff --git a/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py b/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py
index 46f410485..ed0c853ea 100644
--- a/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py
+++ b/modules/createEVENT/EmptyDomainCFD/EmptyDomainCFD.py
@@ -1,25 +1,25 @@
-import argparse
+import argparse # noqa: INP001, D100
import json
-class FloorForces:
+class FloorForces: # noqa: D101
def __init__(self):
self.X = [0]
self.Y = [0]
self.Z = [0]
-def directionToDof(direction):
- """Converts direction to degree of freedom"""
- directioMap = {'X': 1, 'Y': 2, 'Z': 3}
+def directionToDof(direction): # noqa: N802
+ """Converts direction to degree of freedom""" # noqa: D400, D401
+ directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806
return directioMap[direction]
-def addFloorForceToEvent(patternsArray, force, direction, floor):
- """Add force (one component) time series and pattern in the event file"""
- seriesName = 'WindForceSeries_' + str(floor) + direction
- patternName = 'WindForcePattern_' + str(floor) + direction
+def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803
+ """Add force (one component) time series and pattern in the event file""" # noqa: D400
+ seriesName = 'WindForceSeries_' + str(floor) + direction # noqa: N806
+ patternName = 'WindForcePattern_' + str(floor) + direction # noqa: N806
pattern = {
'name': patternName,
'timeSeries': seriesName,
@@ -31,10 +31,10 @@ def addFloorForceToEvent(patternsArray, force, direction, floor):
patternsArray.append(pattern)
-def writeEVENT(forces, eventFilePath):
- """This method writes the EVENT.json file"""
- patternsArray = []
- windEventJson = {
+def writeEVENT(forces, eventFilePath): # noqa: N802, N803
+ """This method writes the EVENT.json file""" # noqa: D400, D401, D404
+ patternsArray = [] # noqa: N806
+ windEventJson = { # noqa: N806
'type': 'Wind',
'subtype': 'IsolatedBuildingCFD',
'pattern': patternsArray,
@@ -44,20 +44,20 @@ def writeEVENT(forces, eventFilePath):
}
# Creating the event dictionary that will be used to export the EVENT json file
- eventDict = {'randomVariables': [], 'Events': [windEventJson]}
+ eventDict = {'randomVariables': [], 'Events': [windEventJson]} # noqa: N806
# Adding floor forces
- for floorForces in forces:
+ for floorForces in forces: # noqa: N806
floor = forces.index(floorForces) + 1
addFloorForceToEvent(patternsArray, floorForces.X, 'X', floor)
addFloorForceToEvent(patternsArray, floorForces.Y, 'Y', floor)
- with open(eventFilePath, 'w') as eventsFile:
+ with open(eventFilePath, 'w') as eventsFile: # noqa: PTH123, N806
json.dump(eventDict, eventsFile)
-def GetFloorsCount(BIMFilePath):
- with open(BIMFilePath) as BIMFile:
+def GetFloorsCount(BIMFilePath): # noqa: N802, N803, D103
+ with open(BIMFilePath) as BIMFile: # noqa: PTH123, N806
bim = json.load(BIMFile)
return int(bim['GeneralInformation']['stories'])
@@ -78,11 +78,11 @@ def GetFloorsCount(BIMFilePath):
# parsing arguments
arguments, unknowns = parser.parse_known_args()
- if arguments.getRV == True:
+ if arguments.getRV == True: # noqa: E712
# Read the number of floors
- floorsCount = GetFloorsCount(arguments.filenameAIM)
+ floorsCount = GetFloorsCount(arguments.filenameAIM) # noqa: N816
forces = []
- for i in range(floorsCount):
- forces.append(FloorForces())
+ for i in range(floorsCount): # noqa: B007
+ forces.append(FloorForces()) # noqa: PERF401
# write the event file
writeEVENT(forces, arguments.filenameEVENT)
diff --git a/modules/createEVENT/EmptyDomainCFD/foam_file_processor.py b/modules/createEVENT/EmptyDomainCFD/foam_file_processor.py
index 946fc6916..58399274d 100644
--- a/modules/createEVENT/EmptyDomainCFD/foam_file_processor.py
+++ b/modules/createEVENT/EmptyDomainCFD/foam_file_processor.py
@@ -1,4 +1,4 @@
-# This script contains functions for reading and writing
+# This script contains functions for reading and writing # noqa: INP001, D100
# OpenFoam dictionaries and filses.
#
import os
@@ -6,18 +6,18 @@
import numpy as np
-def find_keyword_line(dict_lines, keyword):
+def find_keyword_line(dict_lines, keyword): # noqa: D103
start_line = -1
count = 0
for line in dict_lines:
- l = line.lstrip(' ')
+ l = line.lstrip(' ') # noqa: E741
if l.startswith(keyword):
start_line = count
break
- count += 1
+ count += 1 # noqa: SIM113
return start_line
@@ -29,11 +29,11 @@ def write_foam_field(field, file_name):
vectorField,
tensorField,
symmTensorField
- """
- if os.path.exists(file_name):
- os.remove(file_name)
+ """ # noqa: D205, D400, D401
+ if os.path.exists(file_name): # noqa: PTH110
+ os.remove(file_name) # noqa: PTH107
- foam_file = open(file_name, 'w+')
+ foam_file = open(file_name, 'w+') # noqa: SIM115, PTH123
size = np.shape(field)
@@ -54,11 +54,11 @@ def write_foam_field(field, file_name):
def write_scalar_field(field, file_name):
"""Writes a given one dimensional numpy array to OpenFOAM
scalar field format.
- """
- if os.path.exists(file_name):
- os.remove(file_name)
+ """ # noqa: D205, D401
+ if os.path.exists(file_name): # noqa: PTH110
+ os.remove(file_name) # noqa: PTH107
- foam_file = open(file_name, 'w+')
+ foam_file = open(file_name, 'w+') # noqa: SIM115, PTH123
size = np.shape(field)
diff --git a/modules/createEVENT/EmptyDomainCFD/post_process_output.py b/modules/createEVENT/EmptyDomainCFD/post_process_output.py
index 7ec4f39ac..4021a5915 100644
--- a/modules/createEVENT/EmptyDomainCFD/post_process_output.py
+++ b/modules/createEVENT/EmptyDomainCFD/post_process_output.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017, The Regents of the University of California (Regents).
+# Copyright (c) 2016-2017, The Regents of the University of California (Regents). # noqa: INP001, D100
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -54,30 +54,30 @@
from scipy import signal
-def readPressureProbes(fileName):
+def readPressureProbes(fileName): # noqa: N802, N803
"""Created on Wed May 16 14:31:42 2018
Reads pressure probe data from OpenFOAM and return the probe location, time, and the pressure
for each time step.
@author: Abiy
- """
+ """ # noqa: D400, D401
probes = []
p = []
time = []
- with open(fileName) as f:
+ with open(fileName) as f: # noqa: PTH123
for line in f:
if line.startswith('#'):
if line.startswith('# Probe'):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
probes.append([float(line[3]), float(line[4]), float(line[5])])
else:
continue
else:
- line = line.split()
+ line = line.split() # noqa: PLW2901
time.append(float(line[0]))
p_probe_i = np.zeros([len(probes)])
for i in range(len(probes)):
@@ -106,7 +106,7 @@ def read_pressure_data(file_names):
time, pressure
Returns the pressure time and pressure data of the connected file.
- """
+ """ # noqa: D205, D401, D404
no_files = len(file_names)
connected_time = [] # Connected array of time
connected_p = [] # connected array of pressure.
@@ -128,7 +128,7 @@ def read_pressure_data(file_names):
index = np.where(time2 > time1[-1])[0][0]
# index += 1
- except:
+ except: # noqa: E722
# sys.exit('Fatal Error!: the pressure files have time gap')
index = 0 # Joint them even if they have a time gap
@@ -136,7 +136,7 @@ def read_pressure_data(file_names):
connected_p = np.concatenate((connected_p, p2[index:]))
time1 = time2
- p1 = p2
+ p1 = p2 # noqa: F841
return probes, connected_time, connected_p
@@ -144,7 +144,7 @@ class PressureData:
"""A class that holds a pressure data and performs the following operations:
- mean and rms pressure coefficients
- peak pressure coefficients
- """
+ """ # noqa: D205, D400
def __init__(
self,
@@ -172,8 +172,8 @@ def __init__(
self.probe_count = np.shape(self.probes)[0]
def __read_cfd_data(self):
- if os.path.isdir(self.path):
- print('Reading from path : %s' % (self.path))
+ if os.path.isdir(self.path): # noqa: PTH112
+ print('Reading from path : %s' % (self.path)) # noqa: T201, UP031
time_names = os.listdir(self.path)
sorted_index = np.argsort(np.float64(time_names)).tolist()
# print(sorted_index)
@@ -181,7 +181,7 @@ def __read_cfd_data(self):
file_names = []
for i in range(len(sorted_index)):
- file_name = os.path.join(self.path, time_names[sorted_index[i]], 'p')
+ file_name = os.path.join(self.path, time_names[sorted_index[i]], 'p') # noqa: PTH118
file_names.append(file_name)
# print(file_names)
@@ -190,30 +190,30 @@ def __read_cfd_data(self):
# self.p = np.transpose(self.p) # OpenFOAM gives p/rho
else:
- print('Cannot find the file path: %s' % (self.path))
+ print('Cannot find the file path: %s' % (self.path)) # noqa: T201, UP031
def __set_time(self):
- if self.start_time != None:
+ if self.start_time != None: # noqa: E711
start_index = int(np.argmax(self.time > self.start_time))
self.time = self.time[start_index:]
# self.cp = self.cp[:,start_index:]
- try:
+ try: # noqa: SIM105
self.p = self.p[:, start_index:]
- except:
+ except: # noqa: S110, E722
pass
- if self.end_time != None:
+ if self.end_time != None: # noqa: E711
end_index = int(np.argmax(self.time > self.end_time))
self.time = self.time[:end_index]
# self.cp = self.cp[:,:end_index]
- try:
+ try: # noqa: SIM105
self.p = self.p[:, :end_index]
- except:
+ except: # noqa: S110, E722
pass
-def von_karman_spectrum(f, Uav, I, L, comp=0):
- psd = np.zeros(len(f))
+def von_karman_spectrum(f, Uav, I, L, comp=0): # noqa: N803, E741, D103
+ psd = np.zeros(len(f)) # noqa: F841
if comp == 0:
return (
@@ -223,7 +223,7 @@ def von_karman_spectrum(f, Uav, I, L, comp=0):
/ np.power(1.0 + 70.8 * np.power(f * L / Uav, 2.0), 5.0 / 6.0)
)
- if comp == 1 or comp == 2:
+ if comp == 1 or comp == 2: # noqa: RET503, PLR1714, PLR2004
return (
4.0
* np.power(I * Uav, 2.0)
@@ -251,7 +251,7 @@ def psd(x, dt, nseg):
freq, spectra
Returns the frequency and spectra of the signal
- """
+ """ # noqa: D205, D401
x_no_mean = x - np.mean(x)
freq, spectra = signal.welch(
x_no_mean, fs=1.0 / dt, nperseg=len(x_no_mean) / nseg
@@ -264,8 +264,8 @@ def write_open_foam_vector_field(p, file_name):
"""Writes a given vector-field (n x 3) array to OpenFOAM 'vectorField'
format.
- """
- f = open(file_name, 'w+')
+ """ # noqa: D205, D401
+ f = open(file_name, 'w+') # noqa: SIM115, PTH123
f.write('%d' % len(p[:, 2]))
f.write('\n(')
for i in range(len(p[:, 2])):
@@ -275,58 +275,58 @@ def write_open_foam_vector_field(p, file_name):
f.close()
-def read_openFoam_scalar_field(file_name):
- """Reads a given vectorField OpenFOAM into numpy (n x 3) array format."""
- sField = []
+def read_openFoam_scalar_field(file_name): # noqa: N802
+ """Reads a given vectorField OpenFOAM into numpy (n x 3) array format.""" # noqa: D401
+ sField = [] # noqa: N806
- with open(file_name) as f:
+ with open(file_name) as f: # noqa: PTH123
itrf = iter(f)
next(itrf)
for line in itrf:
- if line.startswith('(') or line.startswith(')'):
+ if line.startswith('(') or line.startswith(')'): # noqa: PIE810
continue
- else:
- line = line.split()
+ else: # noqa: RET507
+ line = line.split() # noqa: PLW2901
sField.append(float(line[0]))
- sField = np.asarray(sField, dtype=np.float32)
+ sField = np.asarray(sField, dtype=np.float32) # noqa: N806
- return sField
+ return sField # noqa: RET504
-def read_openFoam_vector_field(file_name):
- """Reads a given vectorField OpenFOAM into numpy (n x 3) array format."""
- vField = []
+def read_openFoam_vector_field(file_name): # noqa: N802
+ """Reads a given vectorField OpenFOAM into numpy (n x 3) array format.""" # noqa: D401
+ vField = [] # noqa: N806
- with open(file_name) as f:
+ with open(file_name) as f: # noqa: PTH123
for line in f:
if line.startswith('('):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
- if len(line) < 3:
+ if len(line) < 3: # noqa: PLR2004
continue
vField.append([float(line[0]), float(line[1]), float(line[2])])
- vField = np.asarray(vField, dtype=np.float32)
+ vField = np.asarray(vField, dtype=np.float32) # noqa: N806
- return vField
+ return vField # noqa: RET504
-def read_openFoam_tensor_field(file_name):
- """Reads a given vectorField OpenFOAM into numpy (n x 3) array format."""
- vField = []
+def read_openFoam_tensor_field(file_name): # noqa: N802
+ """Reads a given vectorField OpenFOAM into numpy (n x 3) array format.""" # noqa: D401
+ vField = [] # noqa: N806
row_count = 9
- with open(file_name) as f:
+ with open(file_name) as f: # noqa: PTH123
for line in f:
if line.startswith('('):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
if len(line) < row_count:
continue
@@ -338,23 +338,23 @@ def read_openFoam_tensor_field(file_name):
vField.append(row)
- vField = np.asarray(vField, dtype=np.float32)
+ vField = np.asarray(vField, dtype=np.float32) # noqa: N806
- return vField
+ return vField # noqa: RET504
-def read_openFoam_symmetric_tensor_field(file_name):
- """Reads a given vectorField OpenFOAM into numpy (n x 3) array format."""
- vField = []
+def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802
+ """Reads a given vectorField OpenFOAM into numpy (n x 3) array format.""" # noqa: D401
+ vField = [] # noqa: N806
row_count = 6
- with open(file_name) as f:
+ with open(file_name) as f: # noqa: PTH123
for line in f:
if line.startswith('('):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
if len(line) < row_count:
continue
@@ -365,9 +365,9 @@ def read_openFoam_symmetric_tensor_field(file_name):
vField.append(row)
- vField = np.asarray(vField, dtype=np.float32)
+ vField = np.asarray(vField, dtype=np.float32) # noqa: N806
- return vField
+ return vField # noqa: RET504
def read_velocity_data(path):
@@ -385,35 +385,35 @@ def read_velocity_data(path):
time, pressure
Returns the velocity time and velocity data of the connected file.
- """
+ """ # noqa: D205, D401, D404
num_files = len(path)
connected_time = [] # Connected array of time
- connected_U = [] # connected array of pressure.
+ connected_U = [] # connected array of pressure. # noqa: N806
time1 = []
- U1 = []
+ U1 = [] # noqa: N806
time2 = []
- U2 = []
+ U2 = [] # noqa: N806
probes = []
for i in range(num_files):
- probes, time2, U2 = read_velocity_probes(path[i])
+ probes, time2, U2 = read_velocity_probes(path[i]) # noqa: N806
if i != 0:
try:
index = np.where(time2 > time1[-1])[0][0]
- except:
+ except: # noqa: E722
# sys.exit('Fatal Error!: the pressure files have time gap')
index = 0 # Join them even if they have a time gap
connected_time = np.concatenate((connected_time, time2[index:]))
- connected_U = np.concatenate((connected_U, U2[index:]))
+ connected_U = np.concatenate((connected_U, U2[index:])) # noqa: N806
else:
connected_time = time2
- connected_U = U2
+ connected_U = U2 # noqa: N806
time1 = time2
- U1 = U2
+ U1 = U2 # noqa: N806, F841
shape = np.shape(connected_U)
- U = np.zeros((shape[1], shape[2], shape[0]))
+ U = np.zeros((shape[1], shape[2], shape[0])) # noqa: N806
for i in range(shape[1]):
for j in range(shape[2]):
@@ -421,33 +421,33 @@ def read_velocity_data(path):
return probes, connected_time, U
-def read_velocity_probes(fileName):
+def read_velocity_probes(fileName): # noqa: N803
"""Created on Wed May 16 14:31:42 2018
Reads velocity probe data from OpenFOAM and return the probe location, time,
and the velocity vector for each time step.
- """
+ """ # noqa: D400, D401
probes = []
- U = []
+ U = [] # noqa: N806
time = []
- with open(fileName) as f:
+ with open(fileName) as f: # noqa: PTH123
for line in f:
if line.startswith('#'):
if line.startswith('# Probe'):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
probes.append([float(line[3]), float(line[4]), float(line[5])])
else:
continue
else:
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
try:
time.append(float(line[0]))
- except:
+ except: # noqa: S112, E722
continue
u_probe_i = np.zeros([len(probes), 3])
for i in range(len(probes)):
@@ -460,13 +460,13 @@ def read_velocity_probes(fileName):
probes = np.asarray(probes, dtype=np.float32)
time = np.asarray(time, dtype=np.float32)
- U = np.asarray(U, dtype=np.float32)
+ U = np.asarray(U, dtype=np.float32) # noqa: N806
return probes, time, U
def calculate_length_scale(u, uav, dt, min_corr=0.0):
- """Calculates the length scale of a velocity time history given."""
+ """Calculates the length scale of a velocity time history given.""" # noqa: D401
u = u - np.mean(u)
corr = signal.correlate(u, u, mode='full')
@@ -479,12 +479,12 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0):
corr = corr[:loc]
- L = uav * np.trapz(corr, dx=dt)
+ L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806
- return L
+ return L # noqa: RET504
-def psd(x, dt, nseg):
+def psd(x, dt, nseg): # noqa: F811
"""Calculates the power spectral density of a given signal using the welch
method.
@@ -502,7 +502,7 @@ def psd(x, dt, nseg):
freq, spectra
Returns the frequency and spectra of the signal
- """
+ """ # noqa: D205, D401
x_no_mean = x - np.mean(x)
freq, spectra = signal.welch(
x_no_mean, fs=1.0 / dt, nperseg=len(x_no_mean) / nseg
@@ -516,13 +516,13 @@ class VelocityData:
- mean velocity profile
- turbulence intensity profiles
- integral scale of turbulence profiles
- """
+ """ # noqa: D205, D400
def __init__(
self,
path,
sampling_rate=400,
- filter_data=False,
+ filter_data=False, # noqa: FBT002
filter_freq=400,
start_time=None,
end_time=None,
@@ -551,14 +551,14 @@ def __init__(
self.__calculate_all()
def __read_cfd_data(self):
- if os.path.isdir(self.path):
- print('Reading from path : %s' % (self.path))
+ if os.path.isdir(self.path): # noqa: PTH112
+ print('Reading from path : %s' % (self.path)) # noqa: T201, UP031
time_names = os.listdir(self.path)
sorted_index = np.argsort(np.float64(time_names)).tolist()
file_names = []
for i in range(len(sorted_index)):
- file_name = os.path.join(self.path, time_names[sorted_index[i]], 'U')
+ file_name = os.path.join(self.path, time_names[sorted_index[i]], 'U') # noqa: PTH118
file_names.append(file_name)
self.probes, self.time, self.U = read_velocity_data(file_names)
@@ -576,14 +576,14 @@ def __read_cfd_data(self):
# Coefficient of variation
cv = np.std(np.diff(self.time)) / np.mean(np.diff(self.time))
- if cv > 1.0e-4:
+ if cv > 1.0e-4: # noqa: PLR2004
self.__adjust_time_step()
else:
- print('Cannot find the file path: %s' % (self.path))
+ print('Cannot find the file path: %s' % (self.path)) # noqa: T201, UP031
def __adjust_time_step(self):
- if self.resample_dt == None:
+ if self.resample_dt == None: # noqa: E711
dt = np.mean(np.diff(self.time))
else:
dt = self.resample_dt
@@ -592,7 +592,7 @@ def __adjust_time_step(self):
shape = np.shape(self.U)
- U = np.zeros((shape[0], shape[1], len(time)))
+ U = np.zeros((shape[0], shape[1], len(time))) # noqa: N806
for i in range(shape[0]):
for j in range(shape[1]):
@@ -611,12 +611,12 @@ def __filter_signal(self):
self.U[i, j, :] = signal.sosfilt(low_pass, self.U[i, j, :])
def __set_time(self):
- if self.start_time != None:
+ if self.start_time != None: # noqa: E711
start_index = int(np.argmax(self.time > self.start_time))
self.time = self.time[start_index:]
self.U = self.U[:, :, start_index:]
- if self.end_time != None:
+ if self.end_time != None: # noqa: E711
end_index = int(np.argmax(self.time > self.end_time))
self.time = self.time[:end_index]
self.U = self.U[:, :, :end_index]
@@ -654,7 +654,7 @@ def __calculate_all(self):
self.uv_bar[i] = np.cov(self.U[i, 0, :], self.U[i, 1, :])[0, 1]
self.uw_bar[i] = np.cov(self.U[i, 0, :], self.U[i, 2, :])[0, 1]
- def get_Uav(self, z):
+ def get_Uav(self, z): # noqa: N802, D102
from scipy import interpolate
f = interpolate.interp1d(self.z, self.Uav)
@@ -670,48 +670,48 @@ def copy_vtk_planes_and_order(input_path, output_path, field):
input_path: path of the vtk files in the postProcessing directory
ouput_path: path to write the vtk files in order
- """
- if not os.path.isdir(input_path):
- print(f'Cannot find the path for: {input_path}')
+ """ # noqa: D205, D401, D404
+ if not os.path.isdir(input_path): # noqa: PTH112
+ print(f'Cannot find the path for: {input_path}') # noqa: T201
return
- if not os.path.isdir(output_path):
- print(f'Cannot find the path for: {output_path}')
+ if not os.path.isdir(output_path): # noqa: PTH112
+ print(f'Cannot find the path for: {output_path}') # noqa: T201
return
- print(f'Reading from path: {input_path}')
+ print(f'Reading from path: {input_path}') # noqa: T201
time_names = os.listdir(input_path)
times = np.float64(time_names)
sorted_index = np.argsort(times).tolist()
n_times = len(times)
- print(f'\tNumber of time directories: {n_times} ')
- print(f'\tTime step: {np.mean(np.diff(times)):.4f} s')
- print(
+ print(f'\tNumber of time directories: {n_times} ') # noqa: T201
+ print(f'\tTime step: {np.mean(np.diff(times)):.4f} s') # noqa: T201
+ print( # noqa: T201
f'\tTotal duration: {times[sorted_index[-1]] - times[sorted_index[0]]:.4f} s'
)
for i in range(n_times):
index = sorted_index[i]
- pathi = os.path.join(input_path, time_names[index])
+ pathi = os.path.join(input_path, time_names[index]) # noqa: PTH118
os.listdir(pathi)
new_name = f'{field}_T{i + 1:04d}.vtk'
for f in os.listdir(pathi):
if f.endswith('.vtk'):
- new_path = os.path.join(output_path, new_name)
- old_path = os.path.join(pathi, f)
+ new_path = os.path.join(output_path, new_name) # noqa: PTH118
+ old_path = os.path.join(pathi, f) # noqa: PTH118
shutil.copyfile(old_path, new_path)
- print(f'Copied path: {old_path}')
+ print(f'Copied path: {old_path}') # noqa: T201
-def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
+def plot_wind_profiles_and_spectra(case_path, output_path, prof_name): # noqa: D103
# Read JSON data
- json_path = os.path.join(
+ json_path = os.path.join( # noqa: PTH118
case_path, 'constant', 'simCenter', 'input', 'EmptyDomainCFD.json'
)
- with open(json_path) as json_file:
+ with open(json_path) as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -719,7 +719,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
ref_h = wc_data['referenceHeight']
- prof_path = os.path.join(case_path, 'postProcessing', prof_name)
+ prof_path = os.path.join(case_path, 'postProcessing', prof_name) # noqa: PTH118
prof = VelocityData(prof_path, start_time=None, end_time=None)
@@ -736,26 +736,26 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
prof_np[:, 8] = prof.L[:, 2]
# Read the target wind profile data
- tar_path = os.path.join(case_path, 'constant', 'boundaryData', 'inlet')
+ tar_path = os.path.join(case_path, 'constant', 'boundaryData', 'inlet') # noqa: PTH118
- tar_p = read_openFoam_vector_field(os.path.join(tar_path, 'points'))
- tar_U = read_openFoam_scalar_field(os.path.join(tar_path, 'U'))
- tar_R = read_openFoam_symmetric_tensor_field(os.path.join(tar_path, 'R'))
- tar_L = read_openFoam_tensor_field(os.path.join(tar_path, 'L'))
+ tar_p = read_openFoam_vector_field(os.path.join(tar_path, 'points')) # noqa: PTH118
+ tar_U = read_openFoam_scalar_field(os.path.join(tar_path, 'U')) # noqa: PTH118, N806
+ tar_R = read_openFoam_symmetric_tensor_field(os.path.join(tar_path, 'R')) # noqa: PTH118, N806
+ tar_L = read_openFoam_tensor_field(os.path.join(tar_path, 'L')) # noqa: PTH118, N806
- tar_U_ref = np.interp(ref_h, tar_p[:, 2], tar_U)
+ tar_U_ref = np.interp(ref_h, tar_p[:, 2], tar_U) # noqa: N806, F841
- tar_Iu = np.sqrt(tar_R[:, 0]) / tar_U
- tar_Iv = np.sqrt(tar_R[:, 3]) / tar_U
- tar_Iw = np.sqrt(tar_R[:, 5]) / tar_U
+ tar_Iu = np.sqrt(tar_R[:, 0]) / tar_U # noqa: N806
+ tar_Iv = np.sqrt(tar_R[:, 3]) / tar_U # noqa: N806
+ tar_Iw = np.sqrt(tar_R[:, 5]) / tar_U # noqa: N806
tar_uw = tar_R[:, 2]
- tar_Lu = tar_L[:, 0]
- tar_Lv = tar_L[:, 3]
- tar_Lw = tar_L[:, 6]
+ tar_Lu = tar_L[:, 0] # noqa: N806
+ tar_Lv = tar_L[:, 3] # noqa: N806
+ tar_Lw = tar_L[:, 6] # noqa: N806
- tar_I = np.zeros((3, len(tar_Iu)))
- tar_L = np.zeros((3, len(tar_Lu)))
+ tar_I = np.zeros((3, len(tar_Iu))) # noqa: N806
+ tar_L = np.zeros((3, len(tar_Lu))) # noqa: N806
tar_I[0, :] = tar_Iu
tar_I[1, :] = tar_Iv
@@ -788,7 +788,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_U,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -799,7 +799,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 1],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -833,7 +833,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Iu,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -844,7 +844,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 2],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -877,7 +877,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Iw,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -888,7 +888,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 3],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -921,7 +921,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Iw,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -932,7 +932,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 4],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -965,7 +965,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_uw,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -976,7 +976,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 5],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1009,7 +1009,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Lu,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -1020,7 +1020,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 6],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1053,7 +1053,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Lv,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -1064,7 +1064,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 7],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1097,7 +1097,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Lw,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -1108,7 +1108,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 8],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1139,7 +1139,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
fig.update_layout(height=850, width=1200, title_text='', showlegend=False)
fig.show()
fig.write_html(
- os.path.join(output_path, prof_name + '.html'),
+ os.path.join(output_path, prof_name + '.html'), # noqa: PTH118
include_mathjax='cdn',
)
@@ -1170,8 +1170,8 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
vertical_spacing=0.15,
)
- U_ref_prof = np.interp(spec_h[i], prof_np[:, 0], prof_np[:, 1])
- U_ref_tar = np.interp(spec_h[i], tar_p[:, 2], tar_U)
+ U_ref_prof = np.interp(spec_h[i], prof_np[:, 0], prof_np[:, 1]) # noqa: N806
+ U_ref_tar = np.interp(spec_h[i], tar_p[:, 2], tar_U) # noqa: N806
# Plot each component
for j in range(ncomp):
@@ -1185,8 +1185,8 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
spec = freq * spec / u_var
freq = freq * spec_h[i] / U_ref_prof
- tar_Iz = tar_I[j, loc_tar]
- tar_Lz = tar_L[j, loc_tar]
+ tar_Iz = tar_I[j, loc_tar] # noqa: N806
+ tar_Lz = tar_L[j, loc_tar] # noqa: N806
vonk_f = np.logspace(np.log10(f_min), np.log10(f_max), 200)
vonk_psd = von_karman_spectrum(vonk_f, U_ref_tar, tar_Iz, tar_Lz, j)
@@ -1198,7 +1198,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=freq,
y=spec,
- line=dict(color='firebrick', width=1.5),
+ line=dict(color='firebrick', width=1.5), # noqa: C408
mode='lines',
name=prof_name,
),
@@ -1209,7 +1209,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=vonk_f,
y=vonk_psd,
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target(von Karman)',
),
@@ -1240,15 +1240,15 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
fig.update_layout(height=450, width=1500, title_text='', showlegend=False)
fig.show()
fig.write_html(
- os.path.join(
+ os.path.join( # noqa: PTH118
output_path, 'spectra_' + prof_name + '_H' + str(1 + i) + '.html'
),
include_mathjax='cdn',
)
-def plot_pressure_profile(case_path, output_path, prof_name):
- prof_path = os.path.join(case_path, 'postProcessing', prof_name)
+def plot_pressure_profile(case_path, output_path, prof_name): # noqa: D103
+ prof_path = os.path.join(case_path, 'postProcessing', prof_name) # noqa: PTH118
prof = PressureData(
prof_path, start_time=1.0, end_time=None, u_ref=0.0, rho=1.25, p_ref=0.0
@@ -1271,7 +1271,7 @@ def plot_pressure_profile(case_path, output_path, prof_name):
go.Scatter(
x=prof.x - np.min(prof.x),
y=std_p,
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1303,7 +1303,7 @@ def plot_pressure_profile(case_path, output_path, prof_name):
fig.update_layout(height=400, width=800, title_text='', showlegend=False)
fig.show()
fig.write_html(
- os.path.join(output_path, 'pressure_' + prof_name + '.html'),
+ os.path.join(output_path, 'pressure_' + prof_name + '.html'), # noqa: PTH118
include_mathjax='cdn',
)
@@ -1325,15 +1325,15 @@ def plot_pressure_profile(case_path, output_path, prof_name):
case_path = arguments.case
- print('Case full path: ', case_path)
+ print('Case full path: ', case_path) # noqa: T201
# prof_name = sys.argv[2]
# Read JSON data
- json_path = os.path.join(
+ json_path = os.path.join( # noqa: PTH118
case_path, 'constant', 'simCenter', 'input', 'EmptyDomainCFD.json'
)
- with open(json_path) as json_file:
+ with open(json_path) as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1342,12 +1342,12 @@ def plot_pressure_profile(case_path, output_path, prof_name):
wind_profiles = rm_data['windProfiles']
vtk_planes = rm_data['vtkPlanes']
- prof_output_path = os.path.join(
+ prof_output_path = os.path.join( # noqa: PTH118
case_path, 'constant', 'simCenter', 'output', 'windProfiles'
)
# Check if it exists and remove files
- if os.path.exists(prof_output_path):
+ if os.path.exists(prof_output_path): # noqa: PTH110
shutil.rmtree(prof_output_path)
# Create new path
@@ -1357,8 +1357,8 @@ def plot_pressure_profile(case_path, output_path, prof_name):
for prof in wind_profiles:
name = prof['name']
field = prof['field']
- print(name)
- print(field)
+ print(name) # noqa: T201
+ print(field) # noqa: T201
if field == 'Velocity':
plot_wind_profiles_and_spectra(case_path, prof_output_path, name)
@@ -1371,8 +1371,8 @@ def plot_pressure_profile(case_path, output_path, prof_name):
name = pln['name']
field = pln['field']
- vtk_path = os.path.join(case_path, 'postProcessing', name)
- vtk_path_renamed = os.path.join(
+ vtk_path = os.path.join(case_path, 'postProcessing', name) # noqa: PTH118
+ vtk_path_renamed = os.path.join( # noqa: PTH118
case_path, 'postProcessing', name + '_renamed'
)
@@ -1381,5 +1381,5 @@ def plot_pressure_profile(case_path, output_path, prof_name):
copy_vtk_planes_and_order(vtk_path, vtk_path_renamed, field)
# Check if it exists and remove files
- if os.path.exists(vtk_path):
+ if os.path.exists(vtk_path): # noqa: PTH110
shutil.rmtree(vtk_path)
diff --git a/modules/createEVENT/EmptyDomainCFD/setup_case.py b/modules/createEVENT/EmptyDomainCFD/setup_case.py
index d9ce05c71..a008f6618 100644
--- a/modules/createEVENT/EmptyDomainCFD/setup_case.py
+++ b/modules/createEVENT/EmptyDomainCFD/setup_case.py
@@ -1,7 +1,7 @@
"""This script writes BC and initial condition, and setups the OpenFoam case
directory.
-"""
+""" # noqa: INP001, D205, D404
import json
import os
@@ -11,9 +11,9 @@
import numpy as np
-def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
+def write_block_mesh_dict(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -22,12 +22,12 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
boundary_data = json_data['boundaryConditions']
origin = np.array(geom_data['origin'])
- scale = geom_data['geometricScale']
+ scale = geom_data['geometricScale'] # noqa: F841
- Lx = geom_data['domainLength']
- Ly = geom_data['domainWidth']
- Lz = geom_data['domainHeight']
- Lf = geom_data['fetchLength']
+ Lx = geom_data['domainLength'] # noqa: N806
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lz = geom_data['domainHeight'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
x_cells = mesh_data['xNumCells']
y_cells = mesh_data['yNumCells']
@@ -69,7 +69,7 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
z_max = z_min + Lz
# Open the template blockMeshDict (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/blockMeshDictTemplate')
+ dict_file = open(template_dict_path + '/blockMeshDictTemplate') # noqa: SIM115, PTH123
# Export to OpenFOAM probe format
dict_lines = dict_file.readlines()
@@ -114,18 +114,18 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
write_file_name = case_path + '/system/blockMeshDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
+def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -133,16 +133,16 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
geom_data = json_data['GeometricData']
- Lx = geom_data['domainLength']
- Ly = geom_data['domainWidth']
- Lz = geom_data['domainHeight']
- Lf = geom_data['fetchLength']
+ Lx = geom_data['domainLength'] # noqa: N806
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lz = geom_data['domainHeight'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
origin = np.array(geom_data['origin'])
num_cells_between_levels = mesh_data['numCellsBetweenLevels']
resolve_feature_angle = mesh_data['resolveFeatureAngle']
- num_processors = mesh_data['numProcessors']
+ num_processors = mesh_data['numProcessors'] # noqa: F841
refinement_boxes = mesh_data['refinementBoxes']
@@ -150,14 +150,14 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
y_min = -Ly / 2.0 - origin[1]
z_min = 0.0 - origin[2]
- x_max = x_min + Lx
+ x_max = x_min + Lx # noqa: F841
y_max = y_min + Ly
z_max = z_min + Lz
inside_point = [x_min + Lf / 2.0, (y_min + y_max) / 2.0, (z_min + z_max) / 2.0]
# Open the template blockMeshDict (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/snappyHexMeshDictTemplate')
+ dict_file = open(template_dict_path + '/snappyHexMeshDictTemplate') # noqa: SIM115, PTH123
# Export to OpenFOAM probe format
dict_lines = dict_file.readlines()
@@ -228,10 +228,10 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/snappyHexMeshDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
@@ -240,9 +240,9 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
def write_boundary_data_files(input_json_path, case_path):
"""This functions writes wind profile files in "constant/boundaryData/inlet"
if TInf options are used for the simulation.
- """
+ """ # noqa: D205, D401, D404
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -261,8 +261,8 @@ def write_boundary_data_files(input_json_path, case_path):
origin = np.array(geom_data['origin'])
- Ly = geom_data['domainWidth']
- Lf = geom_data['fetchLength']
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
x_min = -Lf - origin[0]
y_min = -Ly / 2.0 - origin[1]
@@ -288,25 +288,25 @@ def write_boundary_data_files(input_json_path, case_path):
foam.write_foam_field(wind_profiles[:, 8:17], bd_path + 'L')
-def write_U_file(input_json_path, template_dict_path, case_path):
+def write_U_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- inlet_BC_type = boundary_data['inletBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
+ inlet_BC_type = boundary_data['inletBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/UFileTemplate')
+ dict_file = open(template_dict_path + '/UFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -399,28 +399,28 @@ def write_U_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/U'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_p_file(input_json_path, template_dict_path, case_path):
+def write_p_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/pFileTemplate')
+ dict_file = open(template_dict_path + '/pFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -484,34 +484,34 @@ def write_p_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/p'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_nut_file(input_json_path, template_dict_path, case_path):
+def write_nut_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
# wind_speed = wind_data['roofHeightWindSpeed']
# building_height = wind_data['buildingHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/nutFileTemplate')
+ dict_file = open(template_dict_path + '/nutFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -589,34 +589,34 @@ def write_nut_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/nut'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_epsilon_file(input_json_path, template_dict_path, case_path):
+def write_epsilon_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/epsilonFileTemplate')
+ dict_file = open(template_dict_path + '/epsilonFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -707,34 +707,34 @@ def write_epsilon_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/epsilon'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_k_file(input_json_path, template_dict_path, case_path):
+def write_k_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/kFileTemplate')
+ dict_file = open(template_dict_path + '/kFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -742,7 +742,7 @@ def write_k_file(input_json_path, template_dict_path, case_path):
# BC and initial condition (you may need to scale to model scale)
# k0 = 1.3 #not in model scale
- I = 0.1
+ I = 0.1 # noqa: N806, E741
k0 = 1.5 * (I * wind_speed) ** 2
# Internal Field #########################
@@ -821,18 +821,18 @@ def write_k_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/k'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_controlDict_file(input_json_path, template_dict_path, case_path):
+def write_controlDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -860,7 +860,7 @@ def write_controlDict_file(input_json_path, template_dict_path, case_path):
purge_write = 3
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/controlDictTemplate')
+ dict_file = open(template_dict_path + '/controlDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -931,18 +931,18 @@ def write_controlDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/controlDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_fvSolution_file(input_json_path, template_dict_path, case_path):
+def write_fvSolution_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -955,7 +955,7 @@ def write_fvSolution_file(input_json_path, template_dict_path, case_path):
num_outer_correctors = ns_data['numOuterCorrectors']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/fvSolutionTemplate')
+ dict_file = open(template_dict_path + '/fvSolutionTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -990,18 +990,18 @@ def write_fvSolution_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/fvSolution'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_pressure_probes_file(input_json_path, template_dict_path, case_path):
+def write_pressure_probes_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1011,7 +1011,7 @@ def write_pressure_probes_file(input_json_path, template_dict_path, case_path):
pressure_write_interval = rm_data['pressureWriteInterval']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/probeTemplate')
+ dict_file = open(template_dict_path + '/probeTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1036,18 +1036,18 @@ def write_pressure_probes_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/pressureSamplingPoints'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
+def write_wind_profiles_file(input_json_path, template_dict_path, case_path): # noqa: C901, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1061,7 +1061,7 @@ def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
write_interval = rm_data['profileWriteInterval']
start_time = rm_data['profileStartTime']
- if rm_data['monitorWindProfile'] == False:
+ if rm_data['monitorWindProfile'] == False: # noqa: E712
return
if len(wind_profiles) == 0:
@@ -1070,7 +1070,7 @@ def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
# Write dict files for wind profiles
for prof in wind_profiles:
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/probeTemplate')
+ dict_file = open(template_dict_path + '/probeTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1137,18 +1137,18 @@ def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/' + name
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
+def write_vtk_plane_file(input_json_path, template_dict_path, case_path): # noqa: C901, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1160,7 +1160,7 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
vtk_planes = rm_data['vtkPlanes']
write_interval = rm_data['vtkWriteInterval']
- if rm_data['monitorVTKPlane'] == False:
+ if rm_data['monitorVTKPlane'] == False: # noqa: E712
return
if len(vtk_planes) == 0:
@@ -1169,7 +1169,7 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
# Write dict files for wind profiles
for pln in vtk_planes:
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/vtkPlaneTemplate')
+ dict_file = open(template_dict_path + '/vtkPlaneTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1238,30 +1238,30 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/' + name
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_momentumTransport_file(input_json_path, template_dict_path, case_path):
+def write_momentumTransport_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
turb_data = json_data['turbulenceModeling']
simulation_type = turb_data['simulationType']
- RANS_type = turb_data['RANSModelType']
- LES_type = turb_data['LESModelType']
- DES_type = turb_data['DESModelType']
+ RANS_type = turb_data['RANSModelType'] # noqa: N806
+ LES_type = turb_data['LESModelType'] # noqa: N806
+ DES_type = turb_data['DESModelType'] # noqa: N806
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/momentumTransportTemplate')
+ dict_file = open(template_dict_path + '/momentumTransportTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1293,18 +1293,18 @@ def write_momentumTransport_file(input_json_path, template_dict_path, case_path)
# Write edited dict to file
write_file_name = case_path + '/constant/momentumTransport'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_physicalProperties_file(input_json_path, template_dict_path, case_path):
+def write_physicalProperties_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1313,7 +1313,7 @@ def write_physicalProperties_file(input_json_path, template_dict_path, case_path
kinematic_viscosity = wc_data['kinematicViscosity']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/physicalPropertiesTemplate')
+ dict_file = open(template_dict_path + '/physicalPropertiesTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1325,18 +1325,18 @@ def write_physicalProperties_file(input_json_path, template_dict_path, case_path
# Write edited dict to file
write_file_name = case_path + '/constant/physicalProperties'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_transportProperties_file(input_json_path, template_dict_path, case_path):
+def write_transportProperties_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1345,7 +1345,7 @@ def write_transportProperties_file(input_json_path, template_dict_path, case_pat
kinematic_viscosity = wc_data['kinematicViscosity']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/transportPropertiesTemplate')
+ dict_file = open(template_dict_path + '/transportPropertiesTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1357,18 +1357,18 @@ def write_transportProperties_file(input_json_path, template_dict_path, case_pat
# Write edited dict to file
write_file_name = case_path + '/constant/transportProperties'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
+def write_fvSchemes_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1377,7 +1377,7 @@ def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
simulation_type = turb_data['simulationType']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + f'/fvSchemesTemplate{simulation_type}')
+ dict_file = open(template_dict_path + f'/fvSchemesTemplate{simulation_type}') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1385,18 +1385,18 @@ def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/fvSchemes'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
+def write_decomposeParDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1405,7 +1405,7 @@ def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
num_processors = ns_data['numProcessors']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/decomposeParDictTemplate')
+ dict_file = open(template_dict_path + '/decomposeParDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1425,18 +1425,18 @@ def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/decomposeParDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
+def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
fmax = 200.0
@@ -1452,7 +1452,7 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
duration = duration * 1.010
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/DFSRTurbDictTemplate')
+ dict_file = open(template_dict_path + '/DFSRTurbDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1480,10 +1480,10 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/constant/DFSRTurbDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
@@ -1508,7 +1508,7 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
# set up goes here
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
diff --git a/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py b/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py
index 92c4b411c..9b69d2e41 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/AddBuildingForces.py
@@ -1,30 +1,30 @@
-#!/usr/bin/env python
+#!/usr/bin/env python # noqa: EXE001, D100
import argparse
import json
import os
-def validateCaseDirectoryStructure(caseDir):
+def validateCaseDirectoryStructure(caseDir): # noqa: N802, N803
"""This method validates that the provided case directory is valid and contains the 0, constant and system directory
It also checks that system directory contains the controlDict
- """
- if not os.path.isdir(caseDir):
+ """ # noqa: D205, D400, D401, D404
+ if not os.path.isdir(caseDir): # noqa: PTH112
return False
- caseDirList = os.listdir(caseDir)
- necessaryDirs = ['0', 'constant', 'system']
+ caseDirList = os.listdir(caseDir) # noqa: N806
+ necessaryDirs = ['0', 'constant', 'system'] # noqa: N806
if any(aDir not in caseDirList for aDir in necessaryDirs):
return False
- controlDictPath = os.path.join(caseDir, 'system/controlDict')
- if not os.path.exists(controlDictPath):
+ controlDictPath = os.path.join(caseDir, 'system/controlDict') # noqa: PTH118, N806
+ if not os.path.exists(controlDictPath): # noqa: SIM103, PTH110
return False
return True
-def findFunctionsDictionary(controlDictLines):
- """This method will find functions dictionary in the controlDict"""
+def findFunctionsDictionary(controlDictLines): # noqa: N802, N803
+ """This method will find functions dictionary in the controlDict""" # noqa: D400, D401, D404
for line in controlDictLines:
if line.startswith('functions'):
return (True, controlDictLines.index(line) + 2)
@@ -32,13 +32,13 @@ def findFunctionsDictionary(controlDictLines):
return [False, len(controlDictLines)]
-def writeForceDictionary(controlDictLines, lineIndex, floorsCount, patches):
- """This method will write the force dictionary"""
+def writeForceDictionary(controlDictLines, lineIndex, floorsCount, patches): # noqa: N802, N803
+ """This method will write the force dictionary""" # noqa: D400, D401, D404
for line in ['\t\n', '\tbuildingsForces\n', '\t{\n', '\t}\n', '\n']:
controlDictLines.insert(lineIndex, line)
- lineIndex += 1
+ lineIndex += 1 # noqa: N806
- forceDictionary = {
+ forceDictionary = { # noqa: N806
'type': 'forces',
'libs': '("libforces.so")',
'writeControl': 'timeStep',
@@ -50,17 +50,17 @@ def writeForceDictionary(controlDictLines, lineIndex, floorsCount, patches):
'CofR': '(0 0 0)',
}
- lineIndex -= 2
+ lineIndex -= 2 # noqa: N806
for key, value in forceDictionary.items():
controlDictLines.insert(lineIndex, '\t\t' + key + '\t' + str(value) + ';\n')
- lineIndex += 1
+ lineIndex += 1 # noqa: N806
for line in ['\n', '\t\tbinData\n', '\t\t{\n', '\t\t}\n', '\n']:
controlDictLines.insert(lineIndex, line)
- lineIndex += 1
+ lineIndex += 1 # noqa: N806
- lineIndex -= 2
- binDictionary = {
+ lineIndex -= 2 # noqa: N806
+ binDictionary = { # noqa: N806
'nBin': str(floorsCount),
'direction': '(0 0 1)',
'cumulative': 'no',
@@ -70,38 +70,38 @@ def writeForceDictionary(controlDictLines, lineIndex, floorsCount, patches):
controlDictLines.insert(
lineIndex, '\t\t\t' + key + '\t' + str(value) + ';\n'
)
- lineIndex += 1
+ lineIndex += 1 # noqa: N806
-def AddBuildingsForces(floorsCount, patches):
- """First, we need to validate the case directory structure"""
+def AddBuildingsForces(floorsCount, patches): # noqa: N802, N803
+ """First, we need to validate the case directory structure""" # noqa: D400
# if not validateCaseDirectoryStructure(caseDir):
# print("Invalid OpenFOAM Case Directory!")
# sys.exit(-1)
# controlDictPath = os.path.join(caseDir, "system/controlDict")
- controlDictPath = 'system/controlDict'
- with open(controlDictPath) as controlDict:
- controlDictLines = controlDict.readlines()
+ controlDictPath = 'system/controlDict' # noqa: N806
+ with open(controlDictPath) as controlDict: # noqa: PTH123, N806
+ controlDictLines = controlDict.readlines() # noqa: N806
- [isFound, lineIndex] = findFunctionsDictionary(controlDictLines)
+ [isFound, lineIndex] = findFunctionsDictionary(controlDictLines) # noqa: N806
# If we cannot find the function dictionary, we will create one
if not isFound:
for line in ['\n', 'functions\n', '{\n', '}\n']:
controlDictLines.insert(lineIndex, line)
- lineIndex += 1
+ lineIndex += 1 # noqa: N806
# Now we can add the building forces
writeForceDictionary(controlDictLines, lineIndex, floorsCount, patches)
# Writing updated controlDict
- with open(controlDictPath, 'w') as controlDict:
+ with open(controlDictPath, 'w') as controlDict: # noqa: PTH123, N806
controlDict.writelines(controlDictLines)
-def GetFloorsCount(BIMFilePath):
- with open(BIMFilePath) as BIMFile:
+def GetFloorsCount(BIMFilePath): # noqa: N802, N803, D103
+ with open(BIMFilePath) as BIMFile: # noqa: PTH123, N806
bim = json.load(BIMFile)
return int(bim['GeneralInformation']['stories'])
diff --git a/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py b/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py
index 4c5c63680..e661faa8b 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/GeoClaw.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -50,7 +50,7 @@ class GeoClaw:
-------
decomptext: Get all the text for the decomposeParDict
- """
+ """ # noqa: D205, D400, D404
#############################################################
def creategeom(self, data, path):
@@ -60,7 +60,7 @@ def creategeom(self, data, path):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -68,7 +68,7 @@ def creategeom(self, data, path):
swcfdfile = ', '.join(
hydroutil.extract_element_from_json(data, ['Events', 'SWCFDInteFile'])
)
- swcfdfilepath = os.path.join(path, swcfdfile)
+ swcfdfilepath = os.path.join(path, swcfdfile) # noqa: PTH118
swcfdpoints = np.genfromtxt(
swcfdfilepath, delimiter=',', dtype=(float, float)
)
@@ -78,6 +78,6 @@ def creategeom(self, data, path):
minvalues = np.min(swcfdpoints, axis=0)
# Points of interest
- bottompts = self.getbathy(maxvalues, minvalues, data)
+ bottompts = self.getbathy(maxvalues, minvalues, data) # noqa: F841
return 0
diff --git a/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py b/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py
index f7fab304d..023cc89c3 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/GeoClawBathy.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -47,18 +47,18 @@ class GeoClawBathy:
-------
creategeom: Create geometry and STL files
- """
+ """ # noqa: D205, D400, D404
#############################################################
- def creategeom(self, data, path):
+ def creategeom(self, data, path): # noqa: ARG002
"""Creates the geometry for bathymetry
Arguments:
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
- hydroutil = hydroUtils()
+ hydroutil = hydroUtils() # noqa: F841
return 0
diff --git a/modules/createEVENT/GeoClawOpenFOAM/GeoClawOpenFOAM.py b/modules/createEVENT/GeoClawOpenFOAM/GeoClawOpenFOAM.py
index 1ed84731f..493e430e0 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/GeoClawOpenFOAM.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/GeoClawOpenFOAM.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
#
# This file is part of the SimCenter Backend Applications.
@@ -38,8 +38,8 @@
import argparse
-def main(inputFile, evtFile, getRV):
- print('Finished GeoClawOpenFOAM application')
+def main(inputFile, evtFile, getRV): # noqa: ARG001, N803, D103
+ print('Finished GeoClawOpenFOAM application') # noqa: T201
if __name__ == '__main__':
diff --git a/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py b/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py
index c139ed6a3..744b4b743 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/GetOpenFOAMEvent.py
@@ -1,37 +1,37 @@
-import argparse
+import argparse # noqa: INP001, D100
import json
import os
import re
-class FloorForces:
+class FloorForces: # noqa: D101
def __init__(self):
self.X = [0]
self.Y = [0]
self.Z = [0]
-def validateCaseDirectoryStructure(caseDir):
+def validateCaseDirectoryStructure(caseDir): # noqa: N802, N803
"""This method validates that the provided case directory is valid and contains the 0, constant and system directory
It also checks that system directory contains the controlDict
- """
- if not os.path.isdir(caseDir):
+ """ # noqa: D205, D400, D401, D404
+ if not os.path.isdir(caseDir): # noqa: PTH112
return False
- caseDirList = os.listdir(caseDir)
- necessaryDirs = ['0', 'constant', 'system', 'postProcessing']
+ caseDirList = os.listdir(caseDir) # noqa: N806
+ necessaryDirs = ['0', 'constant', 'system', 'postProcessing'] # noqa: N806
if any(aDir not in caseDirList for aDir in necessaryDirs):
return False
- controlDictPath = os.path.join(caseDir, 'system/controlDict')
- if not os.path.exists(controlDictPath):
+ controlDictPath = os.path.join(caseDir, 'system/controlDict') # noqa: PTH118, N806
+ if not os.path.exists(controlDictPath): # noqa: SIM103, PTH110
return False
return True
-def parseForceComponents(forceArray):
- """This method takes the OpenFOAM force array and parse into components x,y,z"""
+def parseForceComponents(forceArray): # noqa: N802, N803
+ """This method takes the OpenFOAM force array and parse into components x,y,z""" # noqa: D400, D401, D404
components = forceArray.strip('()').split()
x = float(components[0])
y = float(components[1])
@@ -39,33 +39,33 @@ def parseForceComponents(forceArray):
return [x, y, z]
-def ReadOpenFOAMForces(buildingForcesPath, floorsCount, startTime):
- """This method will read the forces from the output files in the OpenFOAM case output (post processing)"""
- deltaT = 0
+def ReadOpenFOAMForces(buildingForcesPath, floorsCount, startTime): # noqa: N802, N803
+ """This method will read the forces from the output files in the OpenFOAM case output (post processing)""" # noqa: D400, D401, D404
+ deltaT = 0 # noqa: N806
forces = []
- for i in range(floorsCount):
- forces.append(FloorForces())
- forcePattern = re.compile(r'\([0-9.e\+\-\s]+\)')
+ for i in range(floorsCount): # noqa: B007
+ forces.append(FloorForces()) # noqa: PERF401
+ forcePattern = re.compile(r'\([0-9.e\+\-\s]+\)') # noqa: N806
- with open(buildingForcesPath) as forcesFile:
- forceLines = forcesFile.readlines()
- needsDeltaT = True
+ with open(buildingForcesPath) as forcesFile: # noqa: PTH123, N806
+ forceLines = forcesFile.readlines() # noqa: N806
+ needsDeltaT = True # noqa: N806
for line in forceLines:
if line.startswith('#'):
continue
- elif needsDeltaT:
- deltaT = float(line.split()[0])
- needsDeltaT = False
+ elif needsDeltaT: # noqa: RET507
+ deltaT = float(line.split()[0]) # noqa: N806
+ needsDeltaT = False # noqa: N806
t = float(line.split()[0])
if t > startTime:
- detectedForces = re.findall(forcePattern, line)
+ detectedForces = re.findall(forcePattern, line) # noqa: N806
for i in range(floorsCount):
# Read the different force types (pressure, viscous and porous!)
- pressureForce = detectedForces[6 * i]
- viscousForce = detectedForces[6 * i + 1]
- porousForce = detectedForces[6 * i + 2]
+ pressureForce = detectedForces[6 * i] # noqa: N806
+ viscousForce = detectedForces[6 * i + 1] # noqa: N806
+ porousForce = detectedForces[6 * i + 2] # noqa: N806
# Parse force components
[fprx, fpry, fprz] = parseForceComponents(pressureForce)
@@ -80,28 +80,28 @@ def ReadOpenFOAMForces(buildingForcesPath, floorsCount, startTime):
return [deltaT, forces]
-def directionToDof(direction):
- """Converts direction to degree of freedom"""
- directioMap = {'X': 1, 'Y': 2, 'Z': 3}
+def directionToDof(direction): # noqa: N802
+ """Converts direction to degree of freedom""" # noqa: D400, D401
+ directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806
return directioMap[direction]
-def addFloorForceToEvent(
- timeSeriesArray,
- patternsArray,
+def addFloorForceToEvent( # noqa: N802
+ timeSeriesArray, # noqa: N803
+ patternsArray, # noqa: N803
force,
direction,
floor,
- dT,
+ dT, # noqa: N803
):
- """Add force (one component) time series and pattern in the event file"""
- seriesName = 'WaterForceSeries_' + str(floor) + direction
- timeSeries = {'name': seriesName, 'dT': dT, 'type': 'Value', 'data': force}
+ """Add force (one component) time series and pattern in the event file""" # noqa: D400
+ seriesName = 'WaterForceSeries_' + str(floor) + direction # noqa: N806
+ timeSeries = {'name': seriesName, 'dT': dT, 'type': 'Value', 'data': force} # noqa: N806
timeSeriesArray.append(timeSeries)
- patternName = 'WaterForcePattern_' + str(floor) + direction
+ patternName = 'WaterForcePattern_' + str(floor) + direction # noqa: N806
pattern = {
'name': patternName,
'timeSeries': seriesName,
@@ -113,19 +113,19 @@ def addFloorForceToEvent(
patternsArray.append(pattern)
-def addFloorPressure(pressureArray, floor):
- """Add floor pressure in the event file"""
- floorPressure = {'story': str(floor), 'pressure': [0.0, 0.0]}
+def addFloorPressure(pressureArray, floor): # noqa: N802, N803
+ """Add floor pressure in the event file""" # noqa: D400
+ floorPressure = {'story': str(floor), 'pressure': [0.0, 0.0]} # noqa: N806
pressureArray.append(floorPressure)
-def writeEVENT(forces, deltaT):
- """This method writes the EVENT.json file"""
- timeSeriesArray = []
- patternsArray = []
- pressureArray = []
- waterEventJson = {
+def writeEVENT(forces, deltaT): # noqa: N802, N803
+ """This method writes the EVENT.json file""" # noqa: D400, D401, D404
+ timeSeriesArray = [] # noqa: N806
+ patternsArray = [] # noqa: N806
+ pressureArray = [] # noqa: N806
+ waterEventJson = { # noqa: N806
'type': 'Hydro',
'subtype': 'OpenFOAM CFD Hydro Event',
'timeSeries': timeSeriesArray,
@@ -137,10 +137,10 @@ def writeEVENT(forces, deltaT):
}
# Creating the event dictionary that will be used to export the EVENT json file
- eventDict = {'randomVariables': [], 'Events': [waterEventJson]}
+ eventDict = {'randomVariables': [], 'Events': [waterEventJson]} # noqa: N806
# Adding floor forces
- for floorForces in forces:
+ for floorForces in forces: # noqa: N806
floor = forces.index(floorForces) + 1
addFloorForceToEvent(
timeSeriesArray, patternsArray, floorForces.X, 'X', floor, deltaT
@@ -150,33 +150,33 @@ def writeEVENT(forces, deltaT):
)
addFloorPressure(pressureArray, floor)
- with open('EVENT.json', 'w') as eventsFile:
+ with open('EVENT.json', 'w') as eventsFile: # noqa: PTH123, N806
json.dump(eventDict, eventsFile)
-def GetOpenFOAMEvent(floorsCount, startTime):
- """Read OpenFOAM output and generate an EVENT file for the building"""
- forcesOutputName = 'buildingsForces'
+def GetOpenFOAMEvent(floorsCount, startTime): # noqa: N802, N803
+ """Read OpenFOAM output and generate an EVENT file for the building""" # noqa: D400
+ forcesOutputName = 'buildingsForces' # noqa: N806
if floorsCount == 1:
- buildingForcesPath = os.path.join(
+ buildingForcesPath = os.path.join( # noqa: PTH118, N806
'postProcessing', forcesOutputName, '0', 'forces.dat'
)
else:
- buildingForcesPath = os.path.join(
+ buildingForcesPath = os.path.join( # noqa: PTH118, N806
'postProcessing', forcesOutputName, '0', 'forces_bins.dat'
)
- [deltaT, forces] = ReadOpenFOAMForces(buildingForcesPath, floorsCount, startTime)
+ [deltaT, forces] = ReadOpenFOAMForces(buildingForcesPath, floorsCount, startTime) # noqa: N806
# Write the EVENT file
writeEVENT(forces, deltaT)
- print('OpenFOAM event is written to EVENT.json')
+ print('OpenFOAM event is written to EVENT.json') # noqa: T201
-def ReadBIM(BIMFilePath):
- with open(BIMFilePath) as BIMFile:
+def ReadBIM(BIMFilePath): # noqa: N802, N803, D103
+ with open(BIMFilePath) as BIMFile: # noqa: PTH123, N806
bim = json.load(BIMFile)
return [
@@ -197,6 +197,6 @@ def ReadBIM(BIMFilePath):
# parsing arguments
arguments, unknowns = parser.parse_known_args()
- [floors, startTime] = ReadBIM(arguments.bim)
+ [floors, startTime] = ReadBIM(arguments.bim) # noqa: N816
GetOpenFOAMEvent(floors, startTime)
diff --git a/modules/createEVENT/GeoClawOpenFOAM/Processor.py b/modules/createEVENT/GeoClawOpenFOAM/Processor.py
index 502d46c2a..8c619f52a 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/Processor.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/Processor.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -18,7 +18,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -42,7 +42,7 @@
####################################################################
# Main function
####################################################################
-def main():
+def main(): # noqa: C901
"""This is the primary function
Objects:
@@ -53,7 +53,7 @@ def main():
Variables:
fipath: Path to dakota.json
- """
+ """ # noqa: D400, D401, D404
# Get the system argument
# Create a parser Object
h2oparser = argparse.ArgumentParser(description='Get the Dakota.json file')
@@ -111,7 +111,7 @@ def main():
fipath = str(fipath)
# Open the JSON file and load all objects
- with open(args.b) as f:
+ with open(args.b) as f: # noqa: PTH123
data = json.load(f)
# Create a utilities object
@@ -122,7 +122,7 @@ def main():
projname = ', '.join(projname)
# Initialize a log ID number
- logID = 0
+ logID = 0 # noqa: N806
# Initialize the log
hydroutil.hydrolog(projname, fipath)
@@ -130,9 +130,9 @@ def main():
# Start the log file with header and time and date
logfiletext = hydroutil.general_header()
hydroutil.flog.write(logfiletext)
- logID += 1
+ logID += 1 # noqa: N806
hydroutil.flog.write(
- '%d (%s): This log has started.\n' % (logID, datetime.datetime.now())
+ '%d (%s): This log has started.\n' % (logID, datetime.datetime.now()) # noqa: DTZ005
)
# Get the simulation type
@@ -142,7 +142,7 @@ def main():
if int(simtype) == 0:
hydroutil.flog.write(
'%d (%s): No simulation type selected in EVT.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
sys.exit('No simulation type selected in EVT.')
@@ -160,7 +160,7 @@ def main():
solver = openfoam7()
elif int(hydrosolver) == 1:
- print('This is not yet available')
+ print('This is not yet available') # noqa: T201
# OpenFoam 8 + olaFlow
# solver = openfoam8()
@@ -171,151 +171,151 @@ def main():
# Call the important routines
# Create folders and files
ecode = solver.createfolder(data, fipath, args)
- logID += 1
+ logID += 1 # noqa: N806
if ecode < 0:
hydroutil.flog.write(
'%d (%s): Error creating folders required for EVT solver.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
sys.exit('Error creating folders required for EVT solver.')
else:
hydroutil.flog.write(
'%d (%s): Folders required for EVT solver created.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
# Create Geometry
ecode = solver.creategeometry(data, fipath)
- logID += 1
+ logID += 1 # noqa: N806
if ecode < 0:
hydroutil.flog.write(
'%d (%s): Error creating geometry required for EVT solver.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
sys.exit('Error creating geometry required for EVT solver')
else:
hydroutil.flog.write(
'%d (%s): Geometry required for EVT solver created.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
# Create meshing
ecode = solver.createmesh(data, fipath)
- logID += 1
+ logID += 1 # noqa: N806
if ecode == 0:
hydroutil.flog.write(
'%d (%s): Files required for EVT meshing created.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
else:
hydroutil.flog.write(
'%d (%s): Error in Files required for EVT meshing.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
# Material
ecode = solver.materials(data, fipath)
- logID += 1
+ logID += 1 # noqa: N806
if ecode < 0:
hydroutil.flog.write(
'%d (%s): Error with material parameters in EVT.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
sys.exit('Error with material parameters in EVT.')
else:
hydroutil.flog.write(
'%d (%s): Files required for materials definition successfully created.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
# Create initial condition
ecode = solver.initial(data, fipath)
- logID += 1
+ logID += 1 # noqa: N806
if ecode < 0:
hydroutil.flog.write(
'%d (%s): Error with initial condition definition in EVT.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
sys.exit('Issues with definition of initial condition in EVT')
else:
hydroutil.flog.write(
'%d (%s): Files required for initial condition definition successfully created.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
# Create boundary condition - to do (alpha, k, omega, nut, nuTilda)
ecode = solver.boundary(data, fipath)
- logID += 1
+ logID += 1 # noqa: N806
if ecode < 0:
hydroutil.flog.write(
'%d (%s): Error with boundary condition definition in EVT.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
sys.exit('Issues with definition of boundary condition in EVT')
else:
hydroutil.flog.write(
'%d (%s): Files required for boundary condition definition successfully created.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
# Turbulence
ecode = solver.turbulence(data, fipath)
- logID += 1
+ logID += 1 # noqa: N806
if ecode < 0:
hydroutil.flog.write(
'%d (%s): Error with turbulence parameters in EVT.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
sys.exit('Error with turbulence parameters in EVT.')
else:
hydroutil.flog.write(
'%d (%s): Files required for turbulence definition successfully created.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
# Parallelization
ecode = solver.parallelize(data, fipath)
- logID += 1
+ logID += 1 # noqa: N806
if ecode < 0:
hydroutil.flog.write(
'%d (%s): Error with parallelization parameters in EVT.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
sys.exit('Error with parallelization parameters in EVT.')
else:
hydroutil.flog.write(
'%d (%s): Files required for parallelization successfully created.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
# Solver settings
ecode = solver.solve(data, fipath)
- logID += 1
+ logID += 1 # noqa: N806
if ecode < 0:
hydroutil.flog.write(
'%d (%s): Error with solver parameters in EVT.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
sys.exit('Error with solver parameters in EVT.')
else:
hydroutil.flog.write(
'%d (%s): Files required for solver successfully created.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
# Other files
ecode = solver.others(data, fipath)
- logID += 1
+ logID += 1 # noqa: N806
if ecode < 0:
hydroutil.flog.write(
'%d (%s): Error with creating auxiliary files in EVT.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
sys.exit('Error with creating auxiliary files in EVT.')
else:
hydroutil.flog.write(
'%d (%s): Auxiliary files required successfully created.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
# Dakota scripts
@@ -323,17 +323,17 @@ def main():
# Event post processing
ecode = solver.postprocessing(data, fipath)
- logID += 1
+ logID += 1 # noqa: N806
if ecode < 0:
hydroutil.flog.write(
'%d (%s): Error with creating postprocessing files in EVT.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
sys.exit('Error with creating postprocessing files in EVT.')
else:
hydroutil.flog.write(
'%d (%s): Postprocessing files required for EVT successfully created.\n'
- % (logID, datetime.datetime.now())
+ % (logID, datetime.datetime.now()) # noqa: DTZ005
)
# Cleaning scripts
@@ -341,7 +341,7 @@ def main():
# Write to caserun file
caseruntext = 'echo HydroUQ complete'
- scriptfile = open('caserun.sh', 'a')
+ scriptfile = open('caserun.sh', 'a') # noqa: SIM115, PTH123
scriptfile.write(caseruntext)
scriptfile.close()
diff --git a/modules/createEVENT/GeoClawOpenFOAM/flume.py b/modules/createEVENT/GeoClawOpenFOAM/flume.py
index 69002cd69..2fcd048f1 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/flume.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/flume.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -53,7 +53,7 @@ class flume:
generateflume: Create STL files for the flume
extremedata: Get the extreme values and building information
- """
+ """ # noqa: D400, D404
#############################################################
def generateflume(self, breadth, path):
@@ -64,7 +64,7 @@ def generateflume(self, breadth, path):
breadth: Breadth f the flume
path: Path where dakota.json exists - where we need to put STL files
- """
+ """ # noqa: D400, D401
# Get the triangulated flume
extremeval = self.flumedata('FlumeData.txt')
self.breadth = breadth
@@ -113,14 +113,14 @@ def generateflume(self, breadth, path):
return extremeval
#############################################################
- def flumedata(self, IpPTFile):
+ def flumedata(self, IpPTFile): # noqa: N803
"""Gets information about the flume to create STL files
Arguments:
---------
IpPTFile: File with points of the flume
- """
+ """ # noqa: D400, D401
# Get the data for the boundary
data_boun = np.genfromtxt(IpPTFile, delimiter=',', dtype=(float, float))
@@ -132,7 +132,7 @@ def flumedata(self, IpPTFile):
)
# Initialize segments for left and right
- segmentLR = []
+ segmentLR = [] # noqa: N806
# Loop over all coordinates and create coordinates
for ii in range(data_boun.shape[0]):
@@ -143,8 +143,8 @@ def flumedata(self, IpPTFile):
segmentLR.extend([(ii, 0)])
# Triangulate the polygon
- ALR = dict(vertices=data_boun, segments=segmentLR)
- BLR = tr.triangulate(ALR)
+ ALR = dict(vertices=data_boun, segments=segmentLR) # noqa: C408, N806
+ BLR = tr.triangulate(ALR) # noqa: N806
# Get the tringles and vertices
nm_triangle = BLR['triangles'].tolist()
@@ -162,10 +162,10 @@ def flumedata(self, IpPTFile):
n0 = self.npt[ii, 0]
n1 = self.npt[ii, 1]
n2 = self.npt[ii, 2]
- centroidX = (1 / 3) * (
+ centroidX = (1 / 3) * ( # noqa: N806
self.npa[n0, 0] + self.npa[n1, 0] + self.npa[n2, 0]
)
- centroidZ = (1 / 3) * (
+ centroidZ = (1 / 3) * ( # noqa: N806
self.npa[n0, 1] + self.npa[n1, 1] + self.npa[n2, 1]
)
po = Point(centroidX, centroidZ)
@@ -188,7 +188,7 @@ def right(self):
---------
none
- """
+ """ # noqa: D400, D401
self.npa_right = np.zeros(shape=(self.npa.shape[0], 3))
self.npa_right[:, 0] = self.npa[:, 0]
self.npa_right[:, 2] = self.npa[:, 1]
@@ -202,7 +202,7 @@ def left(self):
---------
none
- """
+ """ # noqa: D400, D401
self.npa_left = np.zeros(shape=(self.npa.shape[0], 3))
self.npa_left[:, 0] = self.npa[:, 0]
self.npa_left[:, 2] = self.npa[:, 1]
@@ -216,7 +216,7 @@ def lefttri(self):
---------
none
- """
+ """ # noqa: D400
self.npt_left = np.array(self.npt)
self.npt_left[:, [1, 0]] = self.npt_left[:, [0, 1]]
@@ -228,7 +228,7 @@ def front(self):
---------
none
- """
+ """ # noqa: D400
self.npa_front = np.zeros(shape=(4, 3))
self.npa_front[0, :] = self.npa_right[0, :]
self.npa_front[1, :] = self.npa_right[self.npa_right.shape[0] - 1, :]
@@ -243,7 +243,7 @@ def fronttri(self):
---------
none
- """
+ """ # noqa: D400
self.npt_front = np.array([[0, 1, 2], [1, 3, 2]])
####################################################################
@@ -254,7 +254,7 @@ def back(self):
---------
none
- """
+ """ # noqa: D400
self.npa_back = np.zeros(shape=(4, 3))
self.npa_back[0, :] = self.npa_right[self.npa_right.shape[0] - 3, :]
self.npa_back[1, :] = self.npa_right[self.npa_right.shape[0] - 2, :]
@@ -269,7 +269,7 @@ def backtri(self):
---------
none
- """
+ """ # noqa: D400
self.npt_back = np.array([[3, 1, 0], [0, 2, 3]])
####################################################################
@@ -280,7 +280,7 @@ def top(self):
---------
none
- """
+ """ # noqa: D400
self.npa_top = np.zeros(shape=(4, 3))
self.npa_top[0, :] = self.npa_right[self.npa_right.shape[0] - 1, :]
self.npa_top[1, :] = self.npa_right[self.npa_right.shape[0] - 2, :]
@@ -295,7 +295,7 @@ def toptri(self):
---------
none
- """
+ """ # noqa: D400
self.npt_top = np.array([[2, 0, 1], [2, 1, 3]])
####################################################################
@@ -306,7 +306,7 @@ def bottom(self):
---------
none
- """
+ """ # noqa: D400
# Create the coordinate vector
self.npa_bottom = []
@@ -341,7 +341,7 @@ def bottomtri(self):
---------
none
- """
+ """ # noqa: D400
# Create the coordinate vector
self.npt_bottom = []
ntri = 2
@@ -361,7 +361,7 @@ def bottomtri(self):
self.npt_bottom = np.concatenate((self.npt_bottom, npt_temp), axis=0)
#############################################################
- def writeSTL(self, base_filename, npa, npt, path):
+ def writeSTL(self, base_filename, npa, npt, path): # noqa: N802
"""Write the STL files for each patch
Arguments:
@@ -371,23 +371,23 @@ def writeSTL(self, base_filename, npa, npt, path):
npt: List of triangles
path: Location where dakota.json file exists
- """
+ """ # noqa: D400
# Create a filename
filename = base_filename + '.stl'
# Create the STL file
cells = [('triangle', npt)]
meshio.write_points_cells(filename, npa, cells)
# Modify first and last line
- with open(filename) as f:
+ with open(filename) as f: # noqa: PTH123
lines = f.readlines()
lines[0] = 'solid ' + base_filename + '\n'
lines[len(lines) - 1] = 'endsolid ' + base_filename + '\n'
# Write the updated file
- with open(filename, 'w') as f:
+ with open(filename, 'w') as f: # noqa: PTH123
f.writelines(lines)
# Move the file to constant/triSurface folder
- newfilepath = os.path.join(path, 'constant', 'triSurface', filename)
- os.replace(filename, newfilepath)
+ newfilepath = os.path.join(path, 'constant', 'triSurface', filename) # noqa: PTH118
+ os.replace(filename, newfilepath) # noqa: PTH105
#############################################################
def extremedata(self, extreme, breadth):
@@ -399,20 +399,20 @@ def extremedata(self, extreme, breadth):
extreme: Maximum limits
breadth: Breadth of the flume
- """
+ """ # noqa: D400, D401
# Write the Max-Min values for the blockMesh
- BMXmin = extreme[0] - 0.25 * (extreme[1] - extreme[0])
- BMXmax = extreme[1] + 0.25 * (extreme[1] - extreme[0])
- BMYmin = -0.625 * breadth
- BMYmax = 0.625 * breadth
- BMZmin = extreme[2] - 0.25 * (extreme[3] - extreme[2])
- BMZmax = extreme[3] + 0.25 * (extreme[3] - extreme[2])
+ BMXmin = extreme[0] - 0.25 * (extreme[1] - extreme[0]) # noqa: N806
+ BMXmax = extreme[1] + 0.25 * (extreme[1] - extreme[0]) # noqa: N806
+ BMYmin = -0.625 * breadth # noqa: N806
+ BMYmax = 0.625 * breadth # noqa: N806
+ BMZmin = extreme[2] - 0.25 * (extreme[3] - extreme[2]) # noqa: N806
+ BMZmax = extreme[3] + 0.25 * (extreme[3] - extreme[2]) # noqa: N806
# Write the temporary file
filename = 'temp_geometry.txt'
- if os.path.exists(filename):
- os.remove(filename)
- tempfileID = open('temp_geometry.txt', 'w')
+ if os.path.exists(filename): # noqa: PTH110
+ os.remove(filename) # noqa: PTH107
+ tempfileID = open('temp_geometry.txt', 'w') # noqa: SIM115, PTH123, N806
# Write the extreme values to the files
tempfileID.write(
@@ -429,6 +429,6 @@ def extremedata(self, extreme, breadth):
+ str(BMZmax)
+ '\n'
)
- tempfileID.close
+ tempfileID.close # noqa: B018
return 0
diff --git a/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py b/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py
index a166208de..4ff877888 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/hydroUtils.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -48,10 +48,10 @@ class hydroUtils:
hydrolog: Initializes the log file
general_header: Creates the header for the Hydro-UQ files
- """
+ """ # noqa: D205, D404
#############################################################
- def extract(self, obj, path, ind, arr):
+ def extract(self, obj, path, ind, arr): # noqa: C901
"""Extracts an element from a nested dictionary
along a specified path and returns a list.
@@ -62,11 +62,11 @@ def extract(self, obj, path, ind, arr):
ind: An int - starting index
arr: A list - output list
- """
+ """ # noqa: D205, D401
key = path[ind]
if ind + 1 < len(path):
if isinstance(obj, dict):
- if key in obj.keys():
+ if key in obj.keys(): # noqa: SIM118
self.extract(obj.get(key), path, ind + 1, arr)
else:
arr.append(None)
@@ -104,13 +104,13 @@ def extract_element_from_json(self, obj, path):
obj: A list or dict - input dictionary or list of dictionaries
path: A list - list of strings that form the path to the desired element
- """
- if isinstance(obj, dict):
+ """ # noqa: D205, D401
+ if isinstance(obj, dict): # noqa: RET503
return self.extract(obj, path, 0, [])
- elif isinstance(obj, list):
+ elif isinstance(obj, list): # noqa: RET505
outer_arr = []
for item in obj:
- outer_arr.append(self.extract(item, path, 0, []))
+ outer_arr.append(self.extract(item, path, 0, [])) # noqa: PERF401
return outer_arr
#############################################################
@@ -120,25 +120,25 @@ def general_header(self):
Variables
-----------
header: Stores the general header for the Hydro-UQ files
- """
+ """ # noqa: D400, D401
header = """/*--------------------------*- NHERI SimCenter -*----------------------------*\\
| | H |
| | Y | HydroUQ: Water-based Natural Hazards Modeling Application
|======| D | Website: simcenter.designsafe-ci.org/research-tools/hydro-uq
| | R | Version: 1.00
| | O |
-\\*---------------------------------------------------------------------------*/ \n\n"""
+\\*---------------------------------------------------------------------------*/ \n\n""" # noqa: W291
- return header
+ return header # noqa: RET504
####################################################################
- def of7header(self, OFclass, location, filename):
+ def of7header(self, OFclass, location, filename): # noqa: N803
"""Method to create a header for the input dictionaries.
Variables
-----------
header: FileID for the file being created
- """
+ """ # noqa: D401
header = rf"""/*--------------------------*- NHERI SimCenter -*----------------------------*\
| | H |
| | Y | HydroUQ: Water-based Natural Hazards Modeling Application
@@ -154,9 +154,9 @@ class {OFclass};
location "{location}";
object {filename};
}}
-// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n"""
+// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291
- return header
+ return header # noqa: RET504
#############################################################
def hydrolog(self, projname, fipath):
@@ -171,7 +171,7 @@ def hydrolog(self, projname, fipath):
-----------
flog: File pointer to the log file
- """
+ """ # noqa: D400, D401
# Open a log file to write the outputs
# Use project name for the log file
# If no project name is specified, call it Untitled
@@ -181,8 +181,8 @@ def hydrolog(self, projname, fipath):
fname = 'Untitled.h20log'
# Path to the file
- filepath = os.path.join(fipath, fname)
- self.flog = open(filepath, 'w')
+ filepath = os.path.join(fipath, fname) # noqa: PTH118
+ self.flog = open(filepath, 'w') # noqa: SIM115, PTH123
#############################################################
def getlist(self, data):
@@ -192,7 +192,7 @@ def getlist(self, data):
---------
userlist: Name of the project as given by the user
- """
+ """ # noqa: D400, D401
# results = []
# for line in data:
@@ -210,4 +210,4 @@ def getlist(self, data):
data = data.replace(',', ' ')
results = [float(n) for n in data.split()]
- return results
+ return results # noqa: RET504
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py
index e4ae06539..7d01c94ad 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7Alpboundary.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -47,58 +47,58 @@ class of7Alpboundary:
-------
Alptext: Get all the text for the p_rgh-file
- """
+ """ # noqa: D205, D404
#############################################################
- def Alptext(self, data, patches):
+ def Alptext(self, data, patches): # noqa: N802
"""Creates the necessary text for pressure bc for openfoam7
Arguments:
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
# Get the header text for the U-file
- Alptext = self.Alpheader()
+ Alptext = self.Alpheader() # noqa: N806
# Start the outside
- Alptext = Alptext + 'boundaryField\n{\n'
+ Alptext = Alptext + 'boundaryField\n{\n' # noqa: N806
# Loop over all patches
for patchname in patches:
- Alptext = Alptext + '\t' + patchname + '\n'
+ Alptext = Alptext + '\t' + patchname + '\n' # noqa: N806
patch = hydroutil.extract_element_from_json(
data, ['Events', 'PressureType_' + patchname]
)
if patch == [None]:
- Alptype = -1
+ Alptype = -1 # noqa: N806
else:
- Alptype = 0
- Alptext = Alptext + self.Alppatchtext(Alptype, patchname)
+ Alptype = 0 # noqa: N806
+ Alptext = Alptext + self.Alppatchtext(Alptype, patchname) # noqa: N806
# Check for building and other building
- Alptext = Alptext + '\tBuilding\n'
- Alptext = Alptext + self.Alppatchtext(0, 'Building')
- Alptext = Alptext + '\tOtherBuilding\n'
- Alptext = Alptext + self.Alppatchtext(0, 'OtherBuilding')
+ Alptext = Alptext + '\tBuilding\n' # noqa: N806
+ Alptext = Alptext + self.Alppatchtext(0, 'Building') # noqa: N806
+ Alptext = Alptext + '\tOtherBuilding\n' # noqa: N806
+ Alptext = Alptext + self.Alppatchtext(0, 'OtherBuilding') # noqa: N806
# Close the outside
- Alptext = Alptext + '}\n\n'
+ Alptext = Alptext + '}\n\n' # noqa: N806
# Return the text for velocity BC
- return Alptext
+ return Alptext # noqa: RET504
#############################################################
- def Alpheader(self):
+ def Alpheader(self): # noqa: N802
"""Creates the text for the header for pressure file
Variable
-----------
header: Header for the p_rgh-file
- """
+ """ # noqa: D400, D401
header = """/*--------------------------*- NHERI SimCenter -*----------------------------*\\
| | H |
| | Y | HydroUQ: Water-based Natural Hazards Modeling Application
@@ -108,16 +108,16 @@ def Alpheader(self):
\\*---------------------------------------------------------------------------*/
FoamFile
{\n\tversion\t2.0;\n\tformat\tascii;\n\tclass\tvolScalarField;\n\tlocation\t"0";\n\tobject\talpha.water;\n}
-// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n"""
+// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291
header = header + 'dimensions\t[0 0 0 0 0 0 0];\n\n'
header = header + 'internalField\tuniform\t0;\n\n'
# Return the header for U file
- return header
+ return header # noqa: RET504
#############################################################
- def Alppatchtext(self, Alptype, patchname):
+ def Alppatchtext(self, Alptype, patchname): # noqa: ARG002, N802, N803
"""Creates the text the pressure boundary condition
Arguments:
@@ -128,16 +128,16 @@ def Alppatchtext(self, Alptype, patchname):
-----------
Alptext: Text for the particular patch
- """
+ """ # noqa: D400, D401
if patchname == 'Top':
- Alptext = '\t{\n\t\t'
- Alptext = Alptext + 'type\tinletOutlet;\n\t\t'
- Alptext = Alptext + 'inletValue\tuniform 0;\n\t\t'
- Alptext = Alptext + 'value\tuniform 0;\n\t}\n'
+ Alptext = '\t{\n\t\t' # noqa: N806
+ Alptext = Alptext + 'type\tinletOutlet;\n\t\t' # noqa: N806
+ Alptext = Alptext + 'inletValue\tuniform 0;\n\t\t' # noqa: N806
+ Alptext = Alptext + 'value\tuniform 0;\n\t}\n' # noqa: N806
else:
- Alptext = '\t{\n\t\t'
- Alptext = Alptext + 'type\tzeroGradient;\n\t}\n'
+ Alptext = '\t{\n\t\t' # noqa: N806
+ Alptext = Alptext + 'type\tzeroGradient;\n\t}\n' # noqa: N806
# Return the header for U file
return Alptext
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Building.py b/modules/createEVENT/GeoClawOpenFOAM/of7Building.py
index f7ca152d6..2c586616b 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7Building.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7Building.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -52,10 +52,10 @@ class of7Building:
buildcheck: Checks if all files required for creating the building exists
createbuilds: Creates the STL files
- """
+ """ # noqa: D205, D404
#############################################################
- def buildcheck(self, data, path):
+ def buildcheck(self, data, path): # noqa: C901, PLR0911
"""Checks if all files required for creating the building exists
Arguments:
@@ -63,14 +63,14 @@ def buildcheck(self, data, path):
data: all the JSON data
path: Path to where the dakota.json exists
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
# Check if a translate script exists.
# If so delete it
- if os.path.exists('translate.sh'):
- os.remove('translate.sh')
+ if os.path.exists('translate.sh'): # noqa: PTH110
+ os.remove('translate.sh') # noqa: PTH107
# Check for STL file
# Get the type of building definition
@@ -95,22 +95,22 @@ def buildcheck(self, data, path):
builddata = builddata.replace(',', ' ')
nums = [float(n) for n in builddata.split()]
buildtype = nums[0]
- if int(buildtype) == -1 or int(buildtype) == 2:
+ if int(buildtype) == -1 or int(buildtype) == 2: # noqa: PLR2004
stlfile = hydroutil.extract_element_from_json(
data, ['Events', 'BuildingSTLFile']
)
if stlfile == [None]:
return -1
- else:
+ else: # noqa: RET505
stlfile = ', '.join(
hydroutil.extract_element_from_json(
data, ['Events', 'BuildingSTLFile']
)
)
- if not os.path.exists(os.path.join(path, stlfile)):
+ if not os.path.exists(os.path.join(path, stlfile)): # noqa: PTH110, PTH118
return -1
- if int(buildtype) == -2 or int(buildtype) == -1:
+ if int(buildtype) == -2 or int(buildtype) == -1: # noqa: PLR2004
numbuildres += 1
# Check GI
depth = hydroutil.extract_element_from_json(
@@ -171,23 +171,23 @@ def buildcheck(self, data, path):
)
if int(buildshape) == 0:
return -1
- elif int(buildshape) == 1:
+ elif int(buildshape) == 1: # noqa: RET505
stlfile = hydroutil.extract_element_from_json(
data, ['Events', 'BuildingSTLFile']
)
if stlfile == [None]:
return -1
- else:
+ else: # noqa: RET505
stlfile = ', '.join(
hydroutil.extract_element_from_json(
data, ['Events', 'BuildingSTLFile']
)
)
- if not os.path.exists(os.path.join(path, stlfile)):
+ if not os.path.exists(os.path.join(path, stlfile)): # noqa: PTH110, PTH118
return -1
# Check if building distribution selected
- buildDist = ', '.join(
+ buildDist = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'BuildDist'])
)
if int(buildDist) == 0:
@@ -204,7 +204,7 @@ def createbuilds(self, data, path):
data: all the JSON data
path: Path to where the dakota.json exists
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -229,7 +229,7 @@ def buildmanual(self, data, path):
data: all the JSON data
path: Path to where the dakota.json exists
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -255,7 +255,7 @@ def buildmanual(self, data, path):
nums = [float(n) for n in builddata.split()]
buildtype = nums[0]
- if int(buildtype) == -2:
+ if int(buildtype) == -2: # noqa: PLR2004
# Create a temporary file using GI information (Response)
self.buildcubeGI(data, path)
# Increment response buildign number
@@ -266,14 +266,14 @@ def buildmanual(self, data, path):
# Increment response buildign number
numresbuild += 1
elif int(buildtype) == 1:
- print('no response + cuboid')
+ print('no response + cuboid') # noqa: T201
# Create a temporary file
# Call flume to build an STL
# Combine all STL to building + number
# Increment response buildign number
numotherbuild += 1
- elif int(buildtype) == 2:
- print('no response + STL')
+ elif int(buildtype) == 2: # noqa: PLR2004
+ print('no response + STL') # noqa: T201
# Check if STL file exists
# Increment response buildign number
numotherbuild += 1
@@ -284,7 +284,7 @@ def buildmanual(self, data, path):
self.buildflagadd(numresbuild, numotherbuild)
#############################################################
- def buildpara(self, data, path):
+ def buildpara(self, data, path): # noqa: ARG002
"""Creates the STL files for the buildings using parametrized data
Arguments:
@@ -292,12 +292,12 @@ def buildpara(self, data, path):
data: all the JSON data
path: Path to where the dakota.json exists
- """
+ """ # noqa: D400, D401
# Create a utilities object
- hydroutil = hydroUtils()
+ hydroutil = hydroUtils() # noqa: F841
#############################################################
- def buildcubeGI(self, data, path):
+ def buildcubeGI(self, data, path): # noqa: ARG002, N802
"""Creates the STL files for the buildings using parametrized data
Arguments:
@@ -305,9 +305,9 @@ def buildcubeGI(self, data, path):
data: all the JSON data
path: Path to where the dakota.json exists
- """
+ """ # noqa: D400, D401
# Create a utilities object
- hydroutil = hydroUtils()
+ hydroutil = hydroUtils() # noqa: F841
# Create the building STL file
base_filename = 'Building'
@@ -354,28 +354,28 @@ def buildcubeGI(self, data, path):
cells = [('triangle', npt)]
meshio.write_points_cells(filename, npa, cells)
# Modify first and last line
- with open(filename) as f:
+ with open(filename) as f: # noqa: PTH123
lines = f.readlines()
lines[0] = 'solid ' + base_filename + '\n'
lines[len(lines) - 1] = 'endsolid ' + base_filename + '\n'
# Write the updated file
- with open(filename, 'w') as f:
+ with open(filename, 'w') as f: # noqa: PTH123
f.writelines(lines)
# Create the translation script
- if os.path.exists('translate.sh'):
- with open('translate.sh', 'a') as f:
- buildpath = os.path.join('constant', 'triSurface', 'Building.stl')
+ if os.path.exists('translate.sh'): # noqa: PTH110
+ with open('translate.sh', 'a') as f: # noqa: PTH123
+ buildpath = os.path.join('constant', 'triSurface', 'Building.stl') # noqa: PTH118
lines = 'cp Building.stl ' + buildpath + '\n'
f.writelines(lines)
else:
- with open('translate.sh', 'w') as f:
- buildpath = os.path.join('constant', 'triSurface', 'Building.stl')
+ with open('translate.sh', 'w') as f: # noqa: PTH123
+ buildpath = os.path.join('constant', 'triSurface', 'Building.stl') # noqa: PTH118
lines = 'cp Building.stl ' + buildpath + '\n'
f.writelines(lines)
#############################################################
- def readResSTL(self, data, path, ztrans):
+ def readResSTL(self, data, path, ztrans): # noqa: N802
"""Creates the STL files for the buildings using parametrized data
Arguments:
@@ -384,7 +384,7 @@ def readResSTL(self, data, path, ztrans):
path: Path to where the dakota.json exists
ztrans: Translation distance in z-direction
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -394,8 +394,8 @@ def readResSTL(self, data, path, ztrans):
)
# Read the stlfile
- stlfilepath = os.path.join(path, stlfile)
- print(stlfilepath)
+ stlfilepath = os.path.join(path, stlfile) # noqa: PTH118
+ print(stlfilepath) # noqa: T201
mesh = meshio.read(stlfilepath, file_format='stl')
mesh.points[:, 0] = mesh.points[:, 0] / (max(abs(mesh.points[:, 0])))
@@ -436,23 +436,23 @@ def readResSTL(self, data, path, ztrans):
meshio.write_points_cells('Building.stl', mesh.points, mesh.cells)
# Modify first and last line
- with open('Building.stl') as f:
+ with open('Building.stl') as f: # noqa: PTH123
lines = f.readlines()
lines[0] = 'solid ' + 'Building' + '\n'
lines[len(lines) - 1] = 'endsolid ' + 'Building' + '\n'
# Write the updated file
- with open('Building.stl', 'w') as f:
+ with open('Building.stl', 'w') as f: # noqa: PTH123
f.writelines(lines)
# Move the file to constant/triSurface folder
- newfilepath = os.path.join(path, 'constant', 'triSurface', 'Building.stl')
- os.replace('Building.stl', newfilepath)
+ newfilepath = os.path.join(path, 'constant', 'triSurface', 'Building.stl') # noqa: PTH118
+ os.replace('Building.stl', newfilepath) # noqa: PTH105
# Create the translation script
- if os.path.exists('translate.sh'):
- with open('translate.sh', 'a') as f:
- buildpath = os.path.join('constant', 'triSurface', 'Building.stl')
+ if os.path.exists('translate.sh'): # noqa: PTH110
+ with open('translate.sh', 'a') as f: # noqa: PTH123
+ buildpath = os.path.join('constant', 'triSurface', 'Building.stl') # noqa: PTH118
lines = 'export FILE="' + buildpath + '"\n'
lines = (
lines
@@ -466,8 +466,8 @@ def readResSTL(self, data, path, ztrans):
)
f.writelines(lines)
else:
- with open('translate.sh', 'w') as f:
- buildpath = os.path.join('constant', 'triSurface', 'Building.stl')
+ with open('translate.sh', 'w') as f: # noqa: PTH123
+ buildpath = os.path.join('constant', 'triSurface', 'Building.stl') # noqa: PTH118
lines = 'export FILE="' + buildpath + '"\n'
lines = (
lines
@@ -490,7 +490,7 @@ def buildflagadd(self, numresbuild, numotherbuild):
numresbuild: Number of building with response
numotherbuild: NUmber of other buildings
- """
+ """ # noqa: D400
# Get building flag
if numresbuild == 0 and numotherbuild == 0:
flag = 0
@@ -502,5 +502,5 @@ def buildflagadd(self, numresbuild, numotherbuild):
flag = 3
# Add building flag to temp file
- with open('temp_geometry.txt', 'a') as f:
+ with open('temp_geometry.txt', 'a') as f: # noqa: PTH123
f.writelines(str(flag) + '\n')
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Dakota.py b/modules/createEVENT/GeoClawOpenFOAM/of7Dakota.py
index 98575f0ea..1624a2f78 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7Dakota.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7Dakota.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -47,7 +47,7 @@ class of7Dakota:
-------
scripts: Generate relevant scripts
- """
+ """ # noqa: D205, D404
#############################################################
def dakotascripts(self, args):
@@ -58,7 +58,7 @@ def dakotascripts(self, args):
data: all the JSON data
path: Path where dakota.json file is located
- """
+ """ # noqa: D400
caseruntext = 'echo Starting Dakota preparation...\n'
caseruntext = (
caseruntext
@@ -72,21 +72,21 @@ def dakotascripts(self, args):
caseruntext = caseruntext + 'rm -fr 0\n'
caseruntext = caseruntext + 'mkdir EVTfiles\n'
caseruntext = (
- caseruntext + 'mv 0.org ' + os.path.join('EVTfiles', '0.org') + '\n'
+ caseruntext + 'mv 0.org ' + os.path.join('EVTfiles', '0.org') + '\n' # noqa: PTH118
)
caseruntext = (
caseruntext
+ 'mv constant '
- + os.path.join('EVTfiles', 'constant')
+ + os.path.join('EVTfiles', 'constant') # noqa: PTH118
+ '\n'
)
caseruntext = (
- caseruntext + 'mv system ' + os.path.join('EVTfiles', 'system') + '\n'
+ caseruntext + 'mv system ' + os.path.join('EVTfiles', 'system') + '\n' # noqa: PTH118
)
caseruntext = (
caseruntext
+ 'mv postProcessing '
- + os.path.join('EVTfiles', 'postProcessing')
+ + os.path.join('EVTfiles', 'postProcessing') # noqa: PTH118
+ '\n'
)
caseruntext = caseruntext + 'mv *.log EVTfiles\n'
@@ -98,20 +98,20 @@ def dakotascripts(self, args):
caseruntext = caseruntext + 'rm -fr EVTfiles\n\n'
# Write to caserun file
- scriptfile = open('caserun.sh', 'a')
+ scriptfile = open('caserun.sh', 'a') # noqa: SIM115, PTH123
scriptfile.write(caseruntext)
scriptfile.close()
#############################################################
- def cleaning(self, args, path):
+ def cleaning(self, args, path): # noqa: ARG002
"""Create the scripts for cleaning
Arguments:
---------
args: all the arguments
- """
- print('No OF cleaning')
+ """ # noqa: D400
+ print('No OF cleaning') # noqa: T201
# # tar -c -f trial.tar $(readlink -e a b c d)
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py b/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py
index 2f4710041..e89b53037 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7Decomp.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -47,7 +47,7 @@ class of7Decomp:
-------
decomptext: Get all the text for the decomposeParDict
- """
+ """ # noqa: D205, D404
#############################################################
def decomptext(self, data):
@@ -57,7 +57,7 @@ def decomptext(self, data):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -75,7 +75,7 @@ def decomptext(self, data):
decomptext = decomptext + 'method\tscotch;\n\n'
- return decomptext
+ return decomptext # noqa: RET504
#############################################################
def decompheader(self):
@@ -84,7 +84,7 @@ def decompheader(self):
Variable
-----------
header: Header for the decomposeparDict-file
- """
+ """ # noqa: D400, D401
header = """/*--------------------------*- NHERI SimCenter -*----------------------------*\\
| | H |
| | Y | HydroUQ: Water-based Natural Hazards Modeling Application
@@ -94,13 +94,13 @@ def decompheader(self):
\\*---------------------------------------------------------------------------*/
FoamFile
{\n\tversion\t2.0;\n\tformat\tascii;\n\tclass\tdictionary;\n\tlocation\t"system";\n\tobject\tdecomposeParDict;\n}
-// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n"""
+// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291
# Return the header for U file
- return header
+ return header # noqa: RET504
#############################################################
- def scripts(self, data, path):
+ def scripts(self, data, path): # noqa: ARG002
"""Create the scripts for caserun.sh
Arguments:
@@ -108,7 +108,7 @@ def scripts(self, data, path):
data: all the JSON data
path: Path where dakota.json file is located
- """
+ """ # noqa: D400
# Create a utilities object
hydroutil = hydroUtils()
@@ -134,7 +134,7 @@ def scripts(self, data, path):
caseruntext = (
caseruntext + 'echo Starting CFD simulation in parallel...\n'
)
- if int(simtype) == 4:
+ if int(simtype) == 4: # noqa: PLR2004
caseruntext = (
caseruntext
+ 'ibrun -n '
@@ -151,12 +151,12 @@ def scripts(self, data, path):
else:
caseruntext = 'echo Starting CFD simulation in serial...\n'
- if int(simtype) == 4:
+ if int(simtype) == 4: # noqa: PLR2004
caseruntext = caseruntext + 'olaDyMFlow > olaDyMFlow.log\n\n'
else:
caseruntext = caseruntext + 'olaFlow > olaFlow.log\n\n'
# Write to caserun file
- scriptfile = open('caserun.sh', 'a')
+ scriptfile = open('caserun.sh', 'a') # noqa: SIM115, PTH123
scriptfile.write(caseruntext)
scriptfile.close()
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py b/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py
index 8557f7ed5..acbe366c6 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7Geometry.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -55,10 +55,10 @@ class of7Geometry:
geomcheck: Checks if all files required for creating the geometry exists
createSTL: Creates the STL files
- """
+ """ # noqa: D205, D404
#############################################################
- def geomcheck(self, data, path):
+ def geomcheck(self, data, path): # noqa: C901, PLR0911
"""Checks if all files required for creating the geometry exists
Arguments:
@@ -66,7 +66,7 @@ def geomcheck(self, data, path):
data: all the JSON data
path: Path to where the dakota.json exists
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -76,14 +76,14 @@ def geomcheck(self, data, path):
)
# Simtype: Multiscale with SW solutions
- if int(simtype) == 1 or int(simtype) == 2:
+ if int(simtype) == 1 or int(simtype) == 2: # noqa: PLR2004
# Get the number of bathymetry files
numbathy = hydroutil.extract_element_from_json(
data, ['Events', 'NumBathymetryFiles']
)
if numbathy == [None]:
return -1
- else:
+ else: # noqa: RET505
numbathy = ', '.join(
hydroutil.extract_element_from_json(
data, ['Events', 'NumBathymetryFiles']
@@ -99,14 +99,14 @@ def geomcheck(self, data, path):
)
if bathyfilename == [None]:
return -1
- else:
+ else: # noqa: RET505
bathyfilename = ', '.join(
hydroutil.extract_element_from_json(
data, ['Events', 'BathymetryFile' + str(ii)]
)
)
bathyfilepath = os.join.path(path, bathyfilename)
- if not os.path.isfile(bathyfilepath):
+ if not os.path.isfile(bathyfilepath): # noqa: PTH113
return -1
if int(simtype) == 1:
@@ -116,7 +116,7 @@ def geomcheck(self, data, path):
)
if numsoln == [None]:
return -1
- else:
+ else: # noqa: RET505
numsoln = ', '.join(
hydroutil.extract_element_from_json(
data, ['Events', 'NumSolutionFiles']
@@ -132,14 +132,14 @@ def geomcheck(self, data, path):
)
if solnfilename == [None]:
return -1
- else:
+ else: # noqa: RET505
solnfilename = ', '.join(
hydroutil.extract_element_from_json(
data, ['Events', 'SWSolutionFile' + str(ii)]
)
)
solnfilepath = os.join.path(path, solnfilename)
- if not os.path.isfile(solnfilepath):
+ if not os.path.isfile(solnfilepath): # noqa: PTH113
return -1
# Check the SW-CFD interface file
@@ -148,50 +148,50 @@ def geomcheck(self, data, path):
)
if swcfdfile == [None]:
return -1
- else:
+ else: # noqa: RET505
swcfdfile = ', '.join(
hydroutil.extract_element_from_json(
data, ['Events', 'SWCFDInteFile']
)
)
swcfdfilepath = os.join.path(path, swcfdfile)
- if not os.path.isfile(swcfdfilepath):
+ if not os.path.isfile(swcfdfilepath): # noqa: PTH113
return -1
# STL file
- elif int(simtype) == 3:
+ elif int(simtype) == 3: # noqa: PLR2004
# Entry.stl
entrypath = os.join.path(path, 'Entry.stl')
- if not os.path.isfile(entrypath):
+ if not os.path.isfile(entrypath): # noqa: PTH113
return -1
# Exit.stl
exitpath = os.join.path(path, 'Exit.stl')
- if not os.path.isfile(exitpath):
+ if not os.path.isfile(exitpath): # noqa: PTH113
return -1
# Top.stl
toppath = os.join.path(path, 'Top.stl')
- if not os.path.isfile(toppath):
+ if not os.path.isfile(toppath): # noqa: PTH113
return -1
# Bottom.stl
bottompath = os.join.path(path, 'Bottom.stl')
- if not os.path.isfile(bottompath):
+ if not os.path.isfile(bottompath): # noqa: PTH113
return -1
# Left.stl
leftpath = os.join.path(path, 'Left.stl')
- if not os.path.isfile(leftpath):
+ if not os.path.isfile(leftpath): # noqa: PTH113
return -1
# Right.stl
rightpath = os.join.path(path, 'Right.stl')
- if not os.path.isfile(rightpath):
+ if not os.path.isfile(rightpath): # noqa: PTH113
return -1
# Wave flume
- elif int(simtype) == 4:
+ elif int(simtype) == 4: # noqa: PLR2004
# Get the flume type
flumetype = ', '.join(
hydroutil.extract_element_from_json(
@@ -207,13 +207,13 @@ def geomcheck(self, data, path):
)
if numsegs == [None]:
return -1
- else:
+ else: # noqa: RET505
numsegs = ', '.join(
hydroutil.extract_element_from_json(
data, ['Events', 'NumFlumeSegments']
)
)
- if int(numsegs) < 4:
+ if int(numsegs) < 4: # noqa: PLR2004
return -1
flumesegs = hydroutil.extract_element_from_json(
data, ['Events', 'FlumeSegments']
@@ -227,7 +227,7 @@ def geomcheck(self, data, path):
return 0
#############################################################
- def createOFSTL(self, data, path):
+ def createOFSTL(self, data, path): # noqa: C901, N802
"""Creates the STL files
Arguments:
@@ -235,7 +235,7 @@ def createOFSTL(self, data, path):
data: all the JSON data
path: Path to where the dakota.json exists
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -253,18 +253,18 @@ def createOFSTL(self, data, path):
return -1
# Bathymetry only
- elif int(simtype) == 2:
- print('Bathy')
+ elif int(simtype) == 2: # noqa: PLR2004
+ print('Bathy') # noqa: T201
finalgeom = GeoClawBathy()
# Create geometry (i.e. STL files) and extreme file
ecode = finalgeom.creategeom(data, path)
if ecode < 0:
return -1
- elif int(simtype) == 3:
+ elif int(simtype) == 3: # noqa: PLR2004
return 0
- elif int(simtype) == 4:
+ elif int(simtype) == 4: # noqa: PLR2004
# Get the flume type
flumetype = ', '.join(
hydroutil.extract_element_from_json(
@@ -298,7 +298,7 @@ def scripts(self, data):
---------
NONE
- """
+ """ # noqa: D400
# Create a utilities object
hydroutil = hydroUtils()
@@ -308,14 +308,14 @@ def scripts(self, data):
)
# Combine STL files for Hydro mesh or using mesh dict
- if int(mesher[0]) == 0 or int(mesher[0]) == 2:
+ if int(mesher[0]) == 0 or int(mesher[0]) == 2: # noqa: PLR2004
# Get building flag from temp-geometry file
geofile = 'temp_geometry.txt'
data_geoext = np.genfromtxt(geofile, dtype=(float))
flag = int(data_geoext[6])
# If translate file exists, use it
- if os.path.exists('translate.sh'):
+ if os.path.exists('translate.sh'): # noqa: PTH110
caseruntext = 'echo Translating building STL files...\n'
caseruntext = caseruntext + 'chmod +x translate.sh\n'
caseruntext = caseruntext + './translate.sh\n\n'
@@ -324,14 +324,14 @@ def scripts(self, data):
caseruntext = 'echo Combining STL files for usage...\n'
# Join all paths
- entryf = os.path.join('constant', 'triSurface', 'Entry.stl')
- exitf = os.path.join('constant', 'triSurface', 'Exit.stl')
- topf = os.path.join('constant', 'triSurface', 'Top.stl')
- bottomf = os.path.join('constant', 'triSurface', 'Bottom.stl')
- leftf = os.path.join('constant', 'triSurface', 'Left.stl')
- rightf = os.path.join('constant', 'triSurface', 'Right.stl')
- buildingf = os.path.join('constant', 'triSurface', 'Building.stl')
- otherbuildingf = os.path.join(
+ entryf = os.path.join('constant', 'triSurface', 'Entry.stl') # noqa: PTH118
+ exitf = os.path.join('constant', 'triSurface', 'Exit.stl') # noqa: PTH118
+ topf = os.path.join('constant', 'triSurface', 'Top.stl') # noqa: PTH118
+ bottomf = os.path.join('constant', 'triSurface', 'Bottom.stl') # noqa: PTH118
+ leftf = os.path.join('constant', 'triSurface', 'Left.stl') # noqa: PTH118
+ rightf = os.path.join('constant', 'triSurface', 'Right.stl') # noqa: PTH118
+ buildingf = os.path.join('constant', 'triSurface', 'Building.stl') # noqa: PTH118
+ otherbuildingf = os.path.join( # noqa: PTH118
'constant', 'triSurface', 'OtherBuilding.stl'
)
all01 = (
@@ -348,7 +348,7 @@ def scripts(self, data):
+ ' '
+ rightf
)
- full = os.path.join('constant', 'triSurface', 'Full.stl')
+ full = os.path.join('constant', 'triSurface', 'Full.stl') # noqa: PTH118
# For different building cases
if flag == 0:
@@ -357,7 +357,7 @@ def scripts(self, data):
caseruntext = (
caseruntext + all01 + ' ' + buildingf + ' > ' + full + '\n\n'
)
- elif flag == 2:
+ elif flag == 2: # noqa: PLR2004
caseruntext = (
caseruntext
+ all01
@@ -369,7 +369,7 @@ def scripts(self, data):
+ full
+ '\n\n'
)
- elif flag == 3:
+ elif flag == 3: # noqa: PLR2004
caseruntext = (
caseruntext
+ all01
@@ -380,6 +380,6 @@ def scripts(self, data):
+ '\n\n'
)
# Write to caserun file
- scriptfile = open('caserun.sh', 'a')
+ scriptfile = open('caserun.sh', 'a') # noqa: SIM115, PTH123
scriptfile.write(caseruntext)
scriptfile.close()
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py b/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py
index de495270b..82b12159c 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7Initial.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -48,7 +48,7 @@ class of7Initial:
-------
alphatext: Get all the text for the setFieldsDict
- """
+ """ # noqa: D205, D404
#############################################################
def alphatext(self, data, fipath):
@@ -58,7 +58,7 @@ def alphatext(self, data, fipath):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -73,8 +73,8 @@ def alphatext(self, data, fipath):
# Read the values
if int(simtype) == 1:
fname = 'SWAlpha.txt'
- swalphafile = os.path.join(fipath, fname)
- with open(swalphafile) as f:
+ swalphafile = os.path.join(fipath, fname) # noqa: PTH118
+ with open(swalphafile) as f: # noqa: PTH123
gloalpha, localalpha, x1, y1, z1, x2, y2, z2 = (
float(x) for x in next(f).split(',')
)
@@ -173,7 +173,7 @@ def alphatext(self, data, fipath):
alphatext = alphatext + '\n);'
- return alphatext
+ return alphatext # noqa: RET504
#############################################################
def alphaheader(self):
@@ -182,7 +182,7 @@ def alphaheader(self):
Variable
-----------
header: Header for the setFields-file
- """
+ """ # noqa: D400, D401
header = """/*--------------------------*- NHERI SimCenter -*----------------------------*\\
| | H |
| | Y | HydroUQ: Water-based Natural Hazards Modeling Application
@@ -192,10 +192,10 @@ def alphaheader(self):
\\*---------------------------------------------------------------------------*/
FoamFile
{\n\tversion\t2.0;\n\tformat\tascii;\n\tclass\tdictionary;\n\tlocation\t"system";\n\tobject\tsetFieldsDict;\n}
-// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n"""
+// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291
# Return the header for U file
- return header
+ return header # noqa: RET504
#############################################################
def alphacheck(self, data, fipath):
@@ -205,7 +205,7 @@ def alphacheck(self, data, fipath):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -218,8 +218,8 @@ def alphacheck(self, data, fipath):
if simtype == 1:
# Check for the file exists
fname = 'SWAlpha.txt'
- swalphafile = os.path.join(fipath, fname)
- if not os.path.exists(swalphafile):
+ swalphafile = os.path.join(fipath, fname) # noqa: PTH118
+ if not os.path.exists(swalphafile): # noqa: PTH110
return -1
# For all types other than the shallow water
@@ -237,7 +237,7 @@ def alphacheck(self, data, fipath):
)
if numreg == [None]:
return -1
- else:
+ else: # noqa: RET505
numreg = ', '.join(
hydroutil.extract_element_from_json(
data, ['Events', 'NumAlphaRegion']
@@ -256,7 +256,7 @@ def alphacheck(self, data, fipath):
)
if region == [None]:
return -1
- else:
+ else: # noqa: RET505
region = ', '.join(
hydroutil.extract_element_from_json(
data, ['Events', 'InitialAlphaRegion' + str(ii)]
@@ -266,14 +266,14 @@ def alphacheck(self, data, fipath):
# Convert the regions to list of floats
nums = [float(n) for n in regsegs.split()]
# Check if 6 coordinates + 1 alpha number
- if len(nums) != 7:
+ if len(nums) != 7: # noqa: PLR2004
return -1
# Return 0 if all is right
return 0
#############################################################
- def scripts(self, data, path):
+ def scripts(self, data, path): # noqa: ARG002
"""Create the scripts for caserun.sh
Arguments:
@@ -281,12 +281,12 @@ def scripts(self, data, path):
data: all the JSON data
path: Path where dakota.json file is located
- """
+ """ # noqa: D400
# Setfields
caseruntext = 'echo Setting fields...\n'
caseruntext = caseruntext + 'setFields > setFields.log\n\n'
# Write to caserun file
- scriptfile = open('caserun.sh', 'a')
+ scriptfile = open('caserun.sh', 'a') # noqa: SIM115, PTH123
scriptfile.write(caseruntext)
scriptfile.close()
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py b/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py
index 8869c0d6f..b81c87fd9 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7Materials.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -47,7 +47,7 @@ class of7Materials:
-------
mattext: Get all the text for the transportProperties
- """
+ """ # noqa: D205, D404
#############################################################
def mattext(self, data):
@@ -57,7 +57,7 @@ def mattext(self, data):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -120,7 +120,7 @@ def mattext(self, data):
mattext = mattext + 'sigma\t[1 0 -2 0 0 0 0]\t' + sigma + ';\n'
- return mattext
+ return mattext # noqa: RET504
#############################################################
def matheader(self):
@@ -129,7 +129,7 @@ def matheader(self):
Variable
-----------
header: Header for the transportProp-file
- """
+ """ # noqa: D400, D401
header = """/*--------------------------*- NHERI SimCenter -*----------------------------*\\
| | H |
| | Y | HydroUQ: Water-based Natural Hazards Modeling Application
@@ -139,10 +139,10 @@ def matheader(self):
\\*---------------------------------------------------------------------------*/
FoamFile
{\n\tversion\t2.0;\n\tformat\tascii;\n\tclass\tdictionary;\n\tlocation\t"constant";\n\tobject\ttransportProperties;\n}
-// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n"""
+// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291
# Return the header for U file
- return header
+ return header # noqa: RET504
#############################################################
def matcheck(self, data):
@@ -152,7 +152,7 @@ def matcheck(self, data):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py b/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py
index fd5d8fbb4..9e6a6298d 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7Meshing.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -51,7 +51,7 @@ class of7Meshing:
-------
meshcheck: Check all the meshing
- """
+ """ # noqa: D205, D404
#############################################################
def meshcheck(self, data, fipath):
@@ -61,7 +61,7 @@ def meshcheck(self, data, fipath):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -75,28 +75,28 @@ def meshcheck(self, data, fipath):
return 0
# Other mesh software
- elif int(mesher[0]) == 1:
+ elif int(mesher[0]) == 1: # noqa: RET505
meshfile = hydroutil.extract_element_from_json(
data, ['Events', 'MeshFile']
)
if meshfile == [None]:
return -1
- else:
+ else: # noqa: RET505
meshfile = ', '.join(
hydroutil.extract_element_from_json(data, ['Events', 'MeshFile'])
)
- meshfilepath = os.path.join(fipath, meshfile)
- if not os.path.isfile(meshfilepath):
+ meshfilepath = os.path.join(fipath, meshfile) # noqa: PTH118
+ if not os.path.isfile(meshfilepath): # noqa: PTH113
return -1
# Mesh dictionaries
- elif int(mesher[0]) == 2:
+ elif int(mesher[0]) == 2: # noqa: PLR2004
# Get path of bm and shm
- bmfile = os.path.join(fipath, 'blockMeshDict')
- shmfile = os.path.join(fipath, 'snappyHexMeshDict')
+ bmfile = os.path.join(fipath, 'blockMeshDict') # noqa: PTH118
+ shmfile = os.path.join(fipath, 'snappyHexMeshDict') # noqa: PTH118
# Check if both blockmeshdict or SHM do not exist
- if (not os.path.isfile(bmfile)) and (not os.path.isfile(shmfile)):
+ if (not os.path.isfile(bmfile)) and (not os.path.isfile(shmfile)): # noqa: PTH113
return -1
# Return 0 if all is right
@@ -109,7 +109,7 @@ def meshheader(self, fileobjec):
Variable
-----------
header: Header for the solver-files
- """
+ """ # noqa: D400, D401
header = (
"""/*--------------------------*- NHERI SimCenter -*----------------------------*\\
| | H |
@@ -119,14 +119,14 @@ def meshheader(self, fileobjec):
| | O |
\\*---------------------------------------------------------------------------*/
FoamFile
-{\n\tversion\t2.0;\n\tformat\tascii;\n\tclass\tdictionary;\n\tlocation\t"system";\n\tobject\t"""
+{\n\tversion\t2.0;\n\tformat\tascii;\n\tclass\tdictionary;\n\tlocation\t"system";\n\tobject\t""" # noqa: W291
+ fileobjec
+ """;\n}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n"""
)
# Return the header for meshing file
- return header
+ return header # noqa: RET504
#############################################################
def bmeshtext(self, data):
@@ -136,7 +136,7 @@ def bmeshtext(self, data):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Read the geometry data file
data_geoext = np.genfromtxt('temp_geometry.txt', dtype=(float))
@@ -148,7 +148,7 @@ def bmeshtext(self, data):
# Get the mesh sizes
nx = 100 * int(meshsize)
- if abs(data_geoext[1] - data_geoext[0]) > 0.000001:
+ if abs(data_geoext[1] - data_geoext[0]) > 0.000001: # noqa: PLR2004
ny = math.ceil(
5
* nx
@@ -284,7 +284,7 @@ def bmeshtext(self, data):
# Add merge patch pairs
bmeshtext = bmeshtext + 'mergePatchPairs\n(\n);\n'
- return bmeshtext
+ return bmeshtext # noqa: RET504
#############################################################
def sfetext(self):
@@ -294,7 +294,7 @@ def sfetext(self):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Read the geometry data file
data_geoext = np.genfromtxt('temp_geometry.txt', dtype=(float))
@@ -314,10 +314,10 @@ def sfetext(self):
sfetext = sfetext + 'Right.stl\n' + stlinfo + '\n\n'
if int(data_geoext[6]) == 1:
sfetext = sfetext + 'Building.stl\n' + stlinfo + '\n\n'
- elif int(data_geoext[6]) == 2:
+ elif int(data_geoext[6]) == 2: # noqa: PLR2004
sfetext = sfetext + 'Building.stl\n' + stlinfo + '\n\n'
sfetext = sfetext + 'OtherBuilding.stl\n' + stlinfo + '\n\n'
- elif int(data_geoext[6]) == 3:
+ elif int(data_geoext[6]) == 3: # noqa: PLR2004
sfetext = sfetext + 'OtherBuilding.stl\n' + stlinfo + '\n\n'
return sfetext
@@ -330,7 +330,7 @@ def shmtext(self, data):
---------
None
- """
+ """ # noqa: D400, D401
# Read the geometry data file
data_geoext = np.genfromtxt('temp_geometry.txt', dtype=(float))
@@ -360,7 +360,7 @@ def shmtext(self, data):
shmtext = (
shmtext + '\tBuilding.stl {type triSurfaceMesh; name Building;}\n'
)
- elif int(data_geoext[6]) == 2:
+ elif int(data_geoext[6]) == 2: # noqa: PLR2004
shmtext = (
shmtext + '\tBuilding.stl {type triSurfaceMesh; name Building;}\n'
)
@@ -368,7 +368,7 @@ def shmtext(self, data):
shmtext
+ '\tOtherBuilding.stl {type triSurfaceMesh; name OtherBuilding;}\n'
)
- elif int(data_geoext[6]) == 3:
+ elif int(data_geoext[6]) == 3: # noqa: PLR2004
shmtext = (
shmtext
+ '\tOtherBuilding.stl {type triSurfaceMesh; name OtherBuilding;}\n'
@@ -377,8 +377,8 @@ def shmtext(self, data):
shmtext = shmtext + '};\n\n'
# Castellated mesh generation
- maxLocalCells = int(meshsize) * 2000000
- maxGlobalCells = int(meshsize) * 10000000
+ maxLocalCells = int(meshsize) * 2000000 # noqa: N806
+ maxGlobalCells = int(meshsize) * 10000000 # noqa: N806
shmtext = shmtext + 'castellatedMeshControls\n{\n\t'
shmtext = shmtext + 'maxLocalCells\t' + str(maxLocalCells) + ';\n\t'
shmtext = shmtext + 'maxGlobalCells\t' + str(maxGlobalCells) + ';\n\t'
@@ -396,10 +396,10 @@ def shmtext(self, data):
shmtext = shmtext + '{file "Right.eMesh"; level 3;}\n'
if int(data_geoext[6]) == 1:
shmtext = shmtext + '\t\t{file "Building.eMesh"; level 3;}\n'
- elif int(data_geoext[6]) == 2:
+ elif int(data_geoext[6]) == 2: # noqa: PLR2004
shmtext = shmtext + '\t\t{file "Building.eMesh"; level 3;}\n'
shmtext = shmtext + '\t\t{file "OtherBuilding.eMesh"; level 3;}\n'
- elif int(data_geoext[6]) == 3:
+ elif int(data_geoext[6]) == 3: # noqa: PLR2004
shmtext = shmtext + '\t\t{file "OtherBuilding.eMesh"; level 3;}\n'
shmtext = shmtext + '\t);\n\n'
@@ -413,10 +413,10 @@ def shmtext(self, data):
shmtext = shmtext + 'Right {level (2 2);}\n'
if int(data_geoext[6]) == 1:
shmtext = shmtext + '\t\tBuilding {level (2 2);}\n'
- elif int(data_geoext[6]) == 2:
+ elif int(data_geoext[6]) == 2: # noqa: PLR2004
shmtext = shmtext + '\t\tBuilding {level (2 2);}\n'
shmtext = shmtext + '\t\tOtherBuilding {level (2 2);}\n'
- elif int(data_geoext[6]) == 3:
+ elif int(data_geoext[6]) == 3: # noqa: PLR2004
shmtext = shmtext + '\t\tOtherBuilding {level (2 2);}\n'
shmtext = shmtext + '\t};\n\n'
@@ -505,10 +505,10 @@ def shmtext(self, data):
shmtext = shmtext + 'debug\t0;\n'
shmtext = shmtext + 'mergeTolerance\t1E-6;\n'
- return shmtext
+ return shmtext # noqa: RET504
#############################################################
- def scripts(self, data, path):
+ def scripts(self, data, path): # noqa: C901
"""Create the scripts for caserun.sh
Arguments:
@@ -516,7 +516,7 @@ def scripts(self, data, path):
data: all the JSON data
path: Path where dakota.json file is located
- """
+ """ # noqa: D400
# Create a utilities object
hydroutil = hydroUtils()
@@ -538,7 +538,7 @@ def scripts(self, data, path):
caseruntext = caseruntext + 'echo snappyHexMesh running...\n'
caseruntext = caseruntext + 'snappyHexMesh > snappyHexMesh.log\n'
# Copy polyMesh folder
- path2c = os.path.join('2', 'polyMesh')
+ path2c = os.path.join('2', 'polyMesh') # noqa: PTH118
caseruntext = caseruntext + 'cp -r ' + path2c + ' constant\n'
caseruntext = caseruntext + 'rm -fr 1 2\n\n'
@@ -569,34 +569,34 @@ def scripts(self, data, path):
caseruntext = (
caseruntext + 'ideasToFoam $MESHFILE > ideasToFoam.log\n\n'
)
- elif int(meshsoftware[0]) == 2:
+ elif int(meshsoftware[0]) == 2: # noqa: PLR2004
caseruntext = (
caseruntext + 'cfx4ToFoam $MESHFILE > cfx4ToFoam.log\n\n'
)
- elif int(meshsoftware[0]) == 3:
+ elif int(meshsoftware[0]) == 3: # noqa: PLR2004
caseruntext = (
caseruntext + 'gambitToFoam $MESHFILE > gambitToFoam.log\n\n'
)
- elif int(meshsoftware[0]) == 4:
+ elif int(meshsoftware[0]) == 4: # noqa: PLR2004
caseruntext = (
caseruntext + 'gmshToFoam $MESHFILE > gmshToFoam.log\n\n'
)
- elif int(mesher[0]) == 2:
+ elif int(mesher[0]) == 2: # noqa: PLR2004
# COPY THE FILES TO THE RIGHT LOCATION
caseruntext = 'Copying mesh dictionaries...\n'
# blockMesh
- bmfile = os.path.join(path, 'blockMeshDict')
- if os.path.isfile(bmfile):
- bmfilenew = os.path.join('system', 'blockMeshDict')
+ bmfile = os.path.join(path, 'blockMeshDict') # noqa: PTH118
+ if os.path.isfile(bmfile): # noqa: PTH113
+ bmfilenew = os.path.join('system', 'blockMeshDict') # noqa: PTH118
caseruntext = caseruntext + 'cp ' + bmfile + ' ' + bmfilenew + '\n'
caseruntext = caseruntext + 'echo blockMesh running...\n'
caseruntext = caseruntext + 'blockMesh > blockMesh.log\n\n'
# surfaceFeatureExtract
- sfdfile = os.path.join(path, 'surfaceFeatureExtractDict')
- if os.path.isfile(sfdfile):
- sfdfilenew = os.path.join('system', 'surfaceFeatureExtractDict')
+ sfdfile = os.path.join(path, 'surfaceFeatureExtractDict') # noqa: PTH118
+ if os.path.isfile(sfdfile): # noqa: PTH113
+ sfdfilenew = os.path.join('system', 'surfaceFeatureExtractDict') # noqa: PTH118
caseruntext = caseruntext + 'cp ' + sfdfile + ' ' + sfdfilenew + '\n'
caseruntext = caseruntext + 'echo surfaceFeatureExtract running...\n'
caseruntext = (
@@ -605,13 +605,13 @@ def scripts(self, data, path):
)
# snappyHexMesh
- shmfile = os.path.join(path, 'snappyHexMeshDict')
- if os.path.isfile(shmfile):
- shmfilenew = os.path.join('system', 'snappyHexMeshDict')
+ shmfile = os.path.join(path, 'snappyHexMeshDict') # noqa: PTH118
+ if os.path.isfile(shmfile): # noqa: PTH113
+ shmfilenew = os.path.join('system', 'snappyHexMeshDict') # noqa: PTH118
caseruntext = caseruntext + 'cp ' + shmfile + ' ' + shmfilenew + '\n'
caseruntext = caseruntext + 'echo snappyHexMesh running...\n'
caseruntext = caseruntext + 'snappyHexMesh > snappyHexMesh.log\n'
- path2c = os.path.join('2', 'polyMesh')
+ path2c = os.path.join('2', 'polyMesh') # noqa: PTH118
caseruntext = caseruntext + 'cp -r ' + path2c + ' constant\n'
caseruntext = caseruntext + 'rm -fr 1 2\n'
@@ -629,11 +629,11 @@ def scripts(self, data, path):
caseruntext = (
caseruntext
+ 'cp cdictforce '
- + os.path.join('system', 'controlDict')
+ + os.path.join('system', 'controlDict') # noqa: PTH118
+ '\n\n'
)
# Write to caserun file
- scriptfile = open('caserun.sh', 'a')
+ scriptfile = open('caserun.sh', 'a') # noqa: SIM115, PTH123
scriptfile.write(caseruntext)
scriptfile.close()
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Others.py b/modules/createEVENT/GeoClawOpenFOAM/of7Others.py
index 32e260b34..37b8f7f31 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7Others.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7Others.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -47,7 +47,7 @@ class of7Others:
-------
gfiletext: Get all the text for the gravity file
- """
+ """ # noqa: D205, D404
#############################################################
def othersheader(self, fileclas, fileloc, fileobjec):
@@ -56,7 +56,7 @@ def othersheader(self, fileclas, fileloc, fileobjec):
Variable
-----------
header: Header for the other-files
- """
+ """ # noqa: D400, D401
header = (
"""/*--------------------------*- NHERI SimCenter -*----------------------------*\\
| | H |
@@ -66,7 +66,7 @@ def othersheader(self, fileclas, fileloc, fileobjec):
| | O |
\\*---------------------------------------------------------------------------*/
FoamFile
-{\n\tversion\t2.0;\n\tformat\tascii;\n\tclass\t"""
+{\n\tversion\t2.0;\n\tformat\tascii;\n\tclass\t""" # noqa: W291
+ fileclas
+ """;\n\tlocation\t"""
+ '"'
@@ -78,7 +78,7 @@ def othersheader(self, fileclas, fileloc, fileobjec):
)
# Return the header for U file
- return header
+ return header # noqa: RET504
#############################################################
def gfiletext(self, data):
@@ -88,7 +88,7 @@ def gfiletext(self, data):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -101,7 +101,7 @@ def gfiletext(self, data):
hydroutil.extract_element_from_json(data, ['Events', 'SimulationType'])
)
- if int(simtype) == 4:
+ if int(simtype) == 4: # noqa: PLR2004
gz = -9.81
else:
# Get the gravity from dakota.json file
@@ -109,17 +109,17 @@ def gfiletext(self, data):
hydroutil.extract_element_from_json(data, ['Events', 'Gravity'])
)
# Depending on the inputs, initialize gravity in the right direction
- if int(gravity) == 11:
+ if int(gravity) == 11: # noqa: PLR2004
gx = 9.81
- elif int(gravity) == 12:
+ elif int(gravity) == 12: # noqa: PLR2004
gy = 9.81
- elif int(gravity) == 13:
+ elif int(gravity) == 13: # noqa: PLR2004
gz = 9.81
- elif int(gravity) == 21:
+ elif int(gravity) == 21: # noqa: PLR2004
gx = -9.81
- elif int(gravity) == 22:
+ elif int(gravity) == 22: # noqa: PLR2004
gy = -9.81
- elif int(gravity) == 23:
+ elif int(gravity) == 23: # noqa: PLR2004
gz = -9.81
# Get the header text for the gravity-file
@@ -140,4 +140,4 @@ def gfiletext(self, data):
+ ');\n'
)
- return gfiletext
+ return gfiletext # noqa: RET504
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py
index a1247d20f..b34770dff 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7Prboundary.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -47,17 +47,17 @@ class of7Prboundary:
-------
Prtext: Get all the text for the p_rgh-file
- """
+ """ # noqa: D205, D404
#############################################################
- def Prtext(self, data, patches):
+ def Prtext(self, data, patches): # noqa: N802
"""Creates the necessary text for pressure bc for openfoam7
Arguments:
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -93,16 +93,16 @@ def Prtext(self, data, patches):
prtext = prtext + '}\n\n'
# Return the text for velocity BC
- return prtext
+ return prtext # noqa: RET504
#############################################################
- def Prheader(self):
+ def Prheader(self): # noqa: N802
"""Creates the text for the header for pressure file
Variable
-----------
header: Header for the p_rgh-file
- """
+ """ # noqa: D400, D401
header = """/*--------------------------*- NHERI SimCenter -*----------------------------*\\
| | H |
| | Y | HydroUQ: Water-based Natural Hazards Modeling Application
@@ -112,16 +112,16 @@ def Prheader(self):
\\*---------------------------------------------------------------------------*/
FoamFile
{\n\tversion\t2.0;\n\tformat\tascii;\n\tclass\tvolScalarField;\n\tlocation\t"0";\n\tobject\tp_rgh;\n}
-// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n"""
+// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291
header = header + 'dimensions\t[1 -1 -2 0 0 0 0];\n\n'
header = header + 'internalField\tuniform\t0;\n\n'
# Return the header for U file
- return header
+ return header # noqa: RET504
#############################################################
- def Prpatchtext(self, data, Prtype, patchname):
+ def Prpatchtext(self, data, Prtype, patchname): # noqa: C901, N802, N803
"""Creates the text the pressure boundary condition
Arguments:
@@ -133,7 +133,7 @@ def Prpatchtext(self, data, Prtype, patchname):
-----------
Prtext: Text for the particular patch
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -144,31 +144,31 @@ def Prpatchtext(self, data, Prtype, patchname):
# inlet = fixedFluxPressure
# wall/outlet = zeroGradient
# Empty = Empty
- Upatch = hydroutil.extract_element_from_json(
+ Upatch = hydroutil.extract_element_from_json( # noqa: N806
data, ['Events', 'VelocityType_' + patchname]
)
if Upatch == [None]:
- Utype = -1
+ Utype = -1 # noqa: N806
else:
- Utype = ', '.join(
+ Utype = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(
data, ['Events', 'VelocityType_' + patchname]
)
)
- if (int(Utype) > 100) and (int(Utype) < 200):
- Prtype2 = '102'
- elif (int(Utype) > 200) and (int(Utype) < 300):
- Prtype2 = '202'
- elif int(Utype) > 300:
- Prtype2 = '201'
+ if (int(Utype) > 100) and (int(Utype) < 200): # noqa: PLR2004
+ Prtype2 = '102' # noqa: N806
+ elif (int(Utype) > 200) and (int(Utype) < 300): # noqa: PLR2004
+ Prtype2 = '202' # noqa: N806
+ elif int(Utype) > 300: # noqa: PLR2004
+ Prtype2 = '201' # noqa: N806
else:
- Prtype2 = '-1'
+ Prtype2 = '-1' # noqa: N806
else:
- Prtype2 = Prtype
+ Prtype2 = Prtype # noqa: N806
# Test for different pressure types
- if int(Prtype2) == 101:
+ if int(Prtype2) == 101: # noqa: PLR2004
# fixedValue
# Get the pressure values
pres = hydroutil.extract_element_from_json(
@@ -184,28 +184,28 @@ def Prpatchtext(self, data, Prtype, patchname):
)
pr = float(presvals)
# Get the text
- Prtext = '\t{\n\t\t'
- Prtext = Prtext + 'type\tfixedValue;\n\t\t'
- Prtext = Prtext + 'value\t' + str(pr) + ';\n'
- Prtext = Prtext + '\t}\n'
- elif int(Prtype2) == 102:
+ Prtext = '\t{\n\t\t' # noqa: N806
+ Prtext = Prtext + 'type\tfixedValue;\n\t\t' # noqa: N806
+ Prtext = Prtext + 'value\t' + str(pr) + ';\n' # noqa: N806
+ Prtext = Prtext + '\t}\n' # noqa: N806
+ elif int(Prtype2) == 102: # noqa: PLR2004
# fixedFluxPressure
- Prtext = '\t{\n\t\t'
- Prtext = Prtext + 'type\tfixedFluxPressure;\n\t\t'
- Prtext = Prtext + 'value\tuniform 0;\n\t}\n'
- elif int(Prtype2) == 201:
+ Prtext = '\t{\n\t\t' # noqa: N806
+ Prtext = Prtext + 'type\tfixedFluxPressure;\n\t\t' # noqa: N806
+ Prtext = Prtext + 'value\tuniform 0;\n\t}\n' # noqa: N806
+ elif int(Prtype2) == 201: # noqa: PLR2004
# Outlet zero gradient
- Prtext = '\t{\n\t\t'
- Prtext = Prtext + 'type\tzeroGradient;\n\t}\n'
- elif int(Prtype2) == 202:
- Prtext = '\t{\n\t\t'
- Prtext = Prtext + 'type\tfixedValue;\n\t\t'
- Prtext = Prtext + 'value\t0;\n'
- Prtext = Prtext + '\t}\n'
+ Prtext = '\t{\n\t\t' # noqa: N806
+ Prtext = Prtext + 'type\tzeroGradient;\n\t}\n' # noqa: N806
+ elif int(Prtype2) == 202: # noqa: PLR2004
+ Prtext = '\t{\n\t\t' # noqa: N806
+ Prtext = Prtext + 'type\tfixedValue;\n\t\t' # noqa: N806
+ Prtext = Prtext + 'value\t0;\n' # noqa: N806
+ Prtext = Prtext + '\t}\n' # noqa: N806
else:
# Default: Empty
- Prtext = '\t{\n\t\t'
- Prtext = Prtext + 'type\tempty;\n\t}\n'
+ Prtext = '\t{\n\t\t' # noqa: N806
+ Prtext = Prtext + 'type\tempty;\n\t}\n' # noqa: N806
# Return the header for U file
return Prtext
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Process.py b/modules/createEVENT/GeoClawOpenFOAM/of7Process.py
index 7d7785e5f..b1979173d 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7Process.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7Process.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -51,7 +51,7 @@ class of7Process:
-------
pprocesstext: Get all the text for the post-processing
- """
+ """ # noqa: D205, D404
#############################################################
def pprocesstext(self, data, path):
@@ -61,7 +61,7 @@ def pprocesstext(self, data, path):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
solver = of7Solve()
@@ -70,7 +70,7 @@ def pprocesstext(self, data, path):
pprocessfile = ', '.join(
hydroutil.extract_element_from_json(data, ['Events', 'PProcessFile'])
)
- pprocesspath = os.path.join(path, pprocessfile)
+ pprocesspath = os.path.join(path, pprocessfile) # noqa: PTH118
pp_data = np.genfromtxt(pprocesspath, delimiter=',')
num_points = np.shape(pp_data)[0]
ptext = '\t\t(\n'
@@ -89,27 +89,27 @@ def pprocesstext(self, data, path):
# Fields required
value = 0
- pprocessV = hydroutil.extract_element_from_json(
+ pprocessV = hydroutil.extract_element_from_json( # noqa: N806
data, ['Events', 'PPVelocity']
)
if pprocessV != [None]:
- pprocessV = ', '.join(
+ pprocessV = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'PPVelocity'])
)
if pprocessV == 'Yes':
value += 1
- pprocessP = hydroutil.extract_element_from_json(
+ pprocessP = hydroutil.extract_element_from_json( # noqa: N806
data, ['Events', 'PPPressure']
)
if pprocessP != [None]:
- pprocessP = ', '.join(
+ pprocessP = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'PPPressure'])
)
if pprocessP == 'Yes':
value += 2
if value == 1:
fieldtext = '(U)'
- elif value == 2:
+ elif value == 2: # noqa: PLR2004
fieldtext = '(p_rgh)'
else:
fieldtext = '(U p_rgh)'
@@ -132,17 +132,17 @@ def pprocesstext(self, data, path):
sampletext = sampletext + ');\n\n'
sampletext = sampletext + 'fields\t' + fieldtext + ';\n'
- return sampletext
+ return sampletext # noqa: RET504
#############################################################
- def pprocesscdict(self, data, path):
+ def pprocesscdict(self, data, path): # noqa: C901
"""Creates the necessary files for new controldict for post-processing for openfoam7
Arguments:
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
solver = of7Solve()
@@ -154,7 +154,7 @@ def pprocesscdict(self, data, path):
simtype = ', '.join(
hydroutil.extract_element_from_json(data, ['Events', 'SimulationType'])
)
- if int(simtype) == 4:
+ if int(simtype) == 4: # noqa: PLR2004
cdicttext = cdicttext + '\napplication \t olaDyMFlow;\n\n'
else:
cdicttext = cdicttext + '\napplication \t olaFlow;\n\n'
@@ -167,21 +167,21 @@ def pprocesscdict(self, data, path):
cdicttext = cdicttext + 'startFrom \t latestTime;\n\n'
elif restart == 'No':
# Start time
- startT = ', '.join(
+ startT = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'StartTime'])
)
cdicttext = cdicttext + 'startFrom \t startTime;\n\n'
cdicttext = cdicttext + 'startTime \t' + startT + ';\n\n'
# End time
- endT = ', '.join(
+ endT = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'EndTime'])
)
cdicttext = cdicttext + 'stopAt \t endTime;\n\n'
cdicttext = cdicttext + 'endTime \t' + endT + ';\n\n'
# Time interval
- deltaT = ', '.join(
+ deltaT = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'TimeInterval'])
)
cdicttext = cdicttext + 'deltaT \t' + deltaT + ';\n\n'
@@ -190,7 +190,7 @@ def pprocesscdict(self, data, path):
cdicttext = cdicttext + 'writeControl \t adjustableRunTime;\n\n'
# Write interval
- writeT = ', '.join(
+ writeT = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'WriteInterval'])
)
cdicttext = cdicttext + 'writeInterval \t' + writeT + ';\n\n'
@@ -212,7 +212,7 @@ def pprocesscdict(self, data, path):
pprocessfile = ', '.join(
hydroutil.extract_element_from_json(data, ['Events', 'PProcessFile'])
)
- pprocesspath = os.path.join(path, pprocessfile)
+ pprocesspath = os.path.join(path, pprocessfile) # noqa: PTH118
pp_data = np.genfromtxt(pprocesspath, delimiter=',')
num_points = np.shape(pp_data)[0]
ptext = '\t\t\t\t(\n'
@@ -231,27 +231,27 @@ def pprocesscdict(self, data, path):
# Fields required
value = 0
- pprocessV = hydroutil.extract_element_from_json(
+ pprocessV = hydroutil.extract_element_from_json( # noqa: N806
data, ['Events', 'PPVelocity']
)
if pprocessV != [None]:
- pprocessV = ', '.join(
+ pprocessV = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'PPVelocity'])
)
if pprocessV == 'Yes':
value += 1
- pprocessP = hydroutil.extract_element_from_json(
+ pprocessP = hydroutil.extract_element_from_json( # noqa: N806
data, ['Events', 'PPPressure']
)
if pprocessP != [None]:
- pprocessP = ', '.join(
+ pprocessP = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'PPPressure'])
)
if pprocessP == 'Yes':
value += 2
if value == 1:
fieldtext = '(U)'
- elif value == 2:
+ elif value == 2: # noqa: PLR2004
fieldtext = '(p_rgh)'
else:
fieldtext = '(U p_rgh)'
@@ -275,17 +275,17 @@ def pprocesscdict(self, data, path):
cdicttext = cdicttext + '\t\tfields\t' + fieldtext + ';\n'
cdicttext = cdicttext + '\t}\n}'
- return cdicttext
+ return cdicttext # noqa: RET504
#############################################################
- def scripts(self, data, path):
+ def scripts(self, data, path): # noqa: ARG002
"""Creates the necessary postprocessing in scripts
Arguments:
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -294,7 +294,7 @@ def scripts(self, data, path):
)
if pprocess == [None]:
return 0
- else:
+ else: # noqa: RET505
pprocess = ', '.join(
hydroutil.extract_element_from_json(
data, ['Events', 'Postprocessing']
@@ -307,13 +307,13 @@ def scripts(self, data, path):
# Reconstruct case
caseruntext = caseruntext + 'reconstructPar > reconstruct.log \n'
# Move new controlDict
- cdictpppath = os.path.join('system', 'controlDict')
+ cdictpppath = os.path.join('system', 'controlDict') # noqa: PTH118
caseruntext = caseruntext + 'cp cdictpp ' + cdictpppath + '\n'
# Move the wavemakerfile (if exists)
- if os.path.exists(os.path.join('constant', 'wavemakerMovement.txt')):
+ if os.path.exists(os.path.join('constant', 'wavemakerMovement.txt')): # noqa: PTH110, PTH118
caseruntext = caseruntext + 'mkdir extras\n'
- wavepath = os.path.join('constant', 'wavemakerMovement.txt')
- wavepathnew = os.path.join('extras', 'wavemakerMovement.txt')
+ wavepath = os.path.join('constant', 'wavemakerMovement.txt') # noqa: PTH118
+ wavepathnew = os.path.join('extras', 'wavemakerMovement.txt') # noqa: PTH118
caseruntext = (
caseruntext + 'mv ' + wavepath + ' ' + wavepathnew + '\n'
)
@@ -321,16 +321,16 @@ def scripts(self, data, path):
caseruntext = (
caseruntext
+ 'cp sample '
- + os.path.join('system', 'sample')
+ + os.path.join('system', 'sample') # noqa: PTH118
+ '\n'
)
# Start the postprocessing
caseruntext = caseruntext + 'postProcess -func sample \n\n'
# Write to caserun file
- scriptfile = open('caserun.sh', 'a')
+ scriptfile = open('caserun.sh', 'a') # noqa: SIM115, PTH123
scriptfile.write(caseruntext)
- scriptfile.close()
+ scriptfile.close() # noqa: RET503
#############################################################
def pprocesscheck(self, data, path):
@@ -340,7 +340,7 @@ def pprocesscheck(self, data, path):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -351,11 +351,11 @@ def pprocesscheck(self, data, path):
if pprocess == 'No':
return 0
- else:
- pprocessV = ', '.join(
+ else: # noqa: RET505
+ pprocessV = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'PPVelocity'])
)
- pprocessP = ', '.join(
+ pprocessP = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'PPPressure'])
)
if pprocessV == 'Yes' or pprocessP == 'Yes':
@@ -364,13 +364,13 @@ def pprocesscheck(self, data, path):
)
if pprocessfile == [None]:
return -1
- else:
+ else: # noqa: RET505
pprocessfile = ', '.join(
hydroutil.extract_element_from_json(
data, ['Events', 'PProcessFile']
)
)
- if not os.path.exists(os.path.join(path, pprocessfile)):
+ if not os.path.exists(os.path.join(path, pprocessfile)): # noqa: PTH110, PTH118
return -1
else:
return 0
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py
index d61e97537..c432d0bf4 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7PtDboundary.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -47,7 +47,7 @@ class of7PtDboundary:
-------
PDtext: Get all the text for the pointDisplacement-file
- """
+ """ # noqa: D205, D404
# #############################################################
# def PtDtext(self,data,fipath,patches):
@@ -81,7 +81,7 @@ class of7PtDboundary:
# print(patchname)
#############################################################
- def PtDcheck(self, data, patches):
+ def PtDcheck(self, data, patches): # noqa: N802
"""Checks if a point displacement for openfoam7 is required
Arguments:
@@ -89,12 +89,12 @@ def PtDcheck(self, data, patches):
data: all the JSON data
patches: List of boundary patches
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
# Number of moving walls
- numMovWall = 0
+ numMovWall = 0 # noqa: N806
# Loop over all patches
for patchname in patches:
@@ -103,27 +103,27 @@ def PtDcheck(self, data, patches):
data, ['Events', 'VelocityType_' + patchname]
)
if patch == [None]:
- Utype = -1
+ Utype = -1 # noqa: N806
else:
- Utype = ', '.join(
+ Utype = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(
data, ['Events', 'VelocityType_' + patchname]
)
)
# If any moving walls (103 - 104)
- if (int(Utype) == 103) or (int(Utype) == 104):
- numMovWall += 1
+ if (int(Utype) == 103) or (int(Utype) == 104): # noqa: PLR2004
+ numMovWall += 1 # noqa: N806
if numMovWall > 0:
return 1
if numMovWall == 0:
return 0
- else:
+ else: # noqa: RET505
return 1
#############################################################
- def PtDtext(self, data, fipath, patches):
+ def PtDtext(self, data, fipath, patches): # noqa: N802
"""Create text for point displacement for openfoam7
Arguments:
@@ -131,7 +131,7 @@ def PtDtext(self, data, fipath, patches):
data: all the JSON data
patches: List of boundary patches
- """
+ """ # noqa: D400
# Create a utilities object
hydroutil = hydroUtils()
@@ -149,9 +149,9 @@ def PtDtext(self, data, fipath, patches):
data, ['Events', 'VelocityType_' + patchname]
)
if patch == [None]:
- Utype = -1
+ Utype = -1 # noqa: N806
else:
- Utype = ', '.join(
+ Utype = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(
data, ['Events', 'VelocityType_' + patchname]
)
@@ -169,16 +169,16 @@ def PtDtext(self, data, fipath, patches):
ptdtext = ptdtext + '}\n\n'
# Return the text for pointDisplacement
- return ptdtext
+ return ptdtext # noqa: RET504
#############################################################
- def PtDheader(self):
+ def PtDheader(self): # noqa: N802
"""Creates the text for the header
Variable
-----------
header: Header for the pointDisplacement-file
- """
+ """ # noqa: D400, D401
header = """/*--------------------------*- NHERI SimCenter -*----------------------------*\\
| | H |
| | Y | HydroUQ: Water-based Natural Hazards Modeling Application
@@ -188,16 +188,16 @@ def PtDheader(self):
\\*---------------------------------------------------------------------------*/
FoamFile
{\n\tversion\t2.0;\n\tformat\tascii;\n\tclass\tpointVectorField;\n\tlocation\t"0.01";\n\tobject\tpointDisplacement;\n}
-// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n"""
+// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291
header = header + 'dimensions\t[0 1 0 0 0 0 0];\n\n'
header = header + 'internalField\tuniform (0 0 0);\n\n'
# Return the header for U file
- return header
+ return header # noqa: RET504
#############################################################
- def PtDpatchtext(self, data, Utype, patchname, fipath):
+ def PtDpatchtext(self, data, Utype, patchname, fipath): # noqa: ARG002, N802, N803
"""Creates the text the pointDisplacement boundary condition
Arguments:
@@ -211,42 +211,42 @@ def PtDpatchtext(self, data, Utype, patchname, fipath):
-----------
PtDtext: Text for the particular patch
- """
+ """ # noqa: D400, D401
# Get the normal of the patch
normal = self.getNormal(patchname)
# For each patch / type provide the text
# Moving walls
- if (int(Utype) == 103) or (int(Utype) == 104):
- PtDtext = '\t{\n\t\t'
- PtDtext = PtDtext + 'type\twavemakerMovement;\n\t\t'
- PtDtext = PtDtext + 'wavemakerDictName\twavemakerMovementDict;\n\t\t'
- PtDtext = PtDtext + 'value\tuniform (0 0 0);\n'
- PtDtext = PtDtext + '\t}\n'
-
- elif int(Utype) > 300:
- PtDtext = '\t{\n\t\t'
- PtDtext = PtDtext + 'type\tfixedNormalSlip;\n\t\t'
- PtDtext = PtDtext + 'n\t(' + normal + ');\n\t\t'
- PtDtext = PtDtext + 'value\tuniform (0 0 0);\n'
- PtDtext = PtDtext + '\t}\n'
-
- elif (int(Utype) > 200) and (int(Utype) < 300):
- PtDtext = '\t{\n\t\t'
- PtDtext = PtDtext + 'type\tfixedValue;\n\t\t'
- PtDtext = PtDtext + 'value\tuniform (0 0 0);\n'
- PtDtext = PtDtext + '\t}\n'
+ if (int(Utype) == 103) or (int(Utype) == 104): # noqa: PLR2004
+ PtDtext = '\t{\n\t\t' # noqa: N806
+ PtDtext = PtDtext + 'type\twavemakerMovement;\n\t\t' # noqa: N806
+ PtDtext = PtDtext + 'wavemakerDictName\twavemakerMovementDict;\n\t\t' # noqa: N806
+ PtDtext = PtDtext + 'value\tuniform (0 0 0);\n' # noqa: N806
+ PtDtext = PtDtext + '\t}\n' # noqa: N806
+
+ elif int(Utype) > 300: # noqa: PLR2004
+ PtDtext = '\t{\n\t\t' # noqa: N806
+ PtDtext = PtDtext + 'type\tfixedNormalSlip;\n\t\t' # noqa: N806
+ PtDtext = PtDtext + 'n\t(' + normal + ');\n\t\t' # noqa: N806
+ PtDtext = PtDtext + 'value\tuniform (0 0 0);\n' # noqa: N806
+ PtDtext = PtDtext + '\t}\n' # noqa: N806
+
+ elif (int(Utype) > 200) and (int(Utype) < 300): # noqa: PLR2004
+ PtDtext = '\t{\n\t\t' # noqa: N806
+ PtDtext = PtDtext + 'type\tfixedValue;\n\t\t' # noqa: N806
+ PtDtext = PtDtext + 'value\tuniform (0 0 0);\n' # noqa: N806
+ PtDtext = PtDtext + '\t}\n' # noqa: N806
else:
- PtDtext = '\t{\n\t\t'
- PtDtext = PtDtext + 'type\tfixedValue;\n\t\t'
- PtDtext = PtDtext + 'value\tuniform (0 0 0);\n'
- PtDtext = PtDtext + '\t}\n'
+ PtDtext = '\t{\n\t\t' # noqa: N806
+ PtDtext = PtDtext + 'type\tfixedValue;\n\t\t' # noqa: N806
+ PtDtext = PtDtext + 'value\tuniform (0 0 0);\n' # noqa: N806
+ PtDtext = PtDtext + '\t}\n' # noqa: N806
return PtDtext
#############################################################
- def getNormal(self, patchname):
+ def getNormal(self, patchname): # noqa: N802
"""Get the normal to the patch
Arguments:
@@ -257,14 +257,14 @@ def getNormal(self, patchname):
-----------
normal: Normal to the patch
- """
- if (patchname == 'Entry') or (patchname == 'Exit'):
+ """ # noqa: D400
+ if (patchname == 'Entry') or (patchname == 'Exit'): # noqa: PLR1714
normal = '1 0 0'
- elif (patchname == 'Left') or (patchname == 'Right'):
+ elif (patchname == 'Left') or (patchname == 'Right'): # noqa: PLR1714
normal = '0 1 0'
- elif (patchname == 'Bottom') or (patchname == 'Top'):
+ elif (patchname == 'Bottom') or (patchname == 'Top'): # noqa: PLR1714
normal = '0 0 1'
- elif (patchname == 'Building') or (patchname == 'OtherBuilding'):
+ elif (patchname == 'Building') or (patchname == 'OtherBuilding'): # noqa: PLR1714
normal = '1 0 0'
return normal
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py b/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py
index 2315f5c2e..de0fe5fa5 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7Solve.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -47,7 +47,7 @@ class of7Solve:
-------
fvSchemetext: Get all the text for the fvSchemes
- """
+ """ # noqa: D205, D404
#############################################################
def solverheader(self, fileobjec):
@@ -56,7 +56,7 @@ def solverheader(self, fileobjec):
Variable
-----------
header: Header for the solver-files
- """
+ """ # noqa: D400, D401
header = (
"""/*--------------------------*- NHERI SimCenter -*----------------------------*\\
| | H |
@@ -66,114 +66,114 @@ def solverheader(self, fileobjec):
| | O |
\\*---------------------------------------------------------------------------*/
FoamFile
-{\n\tversion\t2.0;\n\tformat\tascii;\n\tclass\tdictionary;\n\tlocation\t"system";\n\tobject\t"""
+{\n\tversion\t2.0;\n\tformat\tascii;\n\tclass\tdictionary;\n\tlocation\t"system";\n\tobject\t""" # noqa: W291
+ fileobjec
+ """;\n}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n"""
)
# Return the header for U file
- return header
+ return header # noqa: RET504
#############################################################
- def fvSchemetext(self, data):
+ def fvSchemetext(self, data): # noqa: ARG002, N802
"""Creates the necessary text for fvSchemes for openfoam7
Arguments:
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Get the header text for the U-file
- fvSchemetext = self.solverheader('fvSchemes')
+ fvSchemetext = self.solverheader('fvSchemes') # noqa: N806
# Add all other items
# ddt
- fvSchemetext = fvSchemetext + 'ddtSchemes\n{\n\tdefault\tEuler;\n}\n\n'
+ fvSchemetext = fvSchemetext + 'ddtSchemes\n{\n\tdefault\tEuler;\n}\n\n' # noqa: N806
# grad
- fvSchemetext = fvSchemetext + 'gradSchemes\n{\n\tdefault\tGauss linear;\n}\n'
+ fvSchemetext = fvSchemetext + 'gradSchemes\n{\n\tdefault\tGauss linear;\n}\n' # noqa: N806
# div
- fvSchemetext = fvSchemetext + '\ndivSchemes\n{\n\t'
- fvSchemetext = fvSchemetext + 'div(rhoPhi,U)\tGauss limitedLinearV 1;\n\t'
- fvSchemetext = fvSchemetext + 'div(U)\tGauss linear;\n\t'
- fvSchemetext = (
+ fvSchemetext = fvSchemetext + '\ndivSchemes\n{\n\t' # noqa: N806
+ fvSchemetext = fvSchemetext + 'div(rhoPhi,U)\tGauss limitedLinearV 1;\n\t' # noqa: N806
+ fvSchemetext = fvSchemetext + 'div(U)\tGauss linear;\n\t' # noqa: N806
+ fvSchemetext = ( # noqa: N806
fvSchemetext
+ 'div((rhoPhi|interpolate(porosity)),U)\tGauss limitedLinearV 1;\n\t'
)
- fvSchemetext = (
+ fvSchemetext = ( # noqa: N806
fvSchemetext + 'div(rhoPhiPor,UPor)\tGauss limitedLinearV 1;\n\t'
)
- fvSchemetext = fvSchemetext + 'div(rhoPhi,UPor)\tGauss limitedLinearV 1;\n\t'
- fvSchemetext = fvSchemetext + 'div(rhoPhiPor,U)\tGauss limitedLinearV 1;\n\t'
- fvSchemetext = fvSchemetext + 'div(phi,alpha)\tGauss vanLeer;\n\t'
- fvSchemetext = (
+ fvSchemetext = fvSchemetext + 'div(rhoPhi,UPor)\tGauss limitedLinearV 1;\n\t' # noqa: N806
+ fvSchemetext = fvSchemetext + 'div(rhoPhiPor,U)\tGauss limitedLinearV 1;\n\t' # noqa: N806
+ fvSchemetext = fvSchemetext + 'div(phi,alpha)\tGauss vanLeer;\n\t' # noqa: N806
+ fvSchemetext = ( # noqa: N806
fvSchemetext + 'div(phirb,alpha)\tGauss interfaceCompression;\n\t'
)
- fvSchemetext = (
+ fvSchemetext = ( # noqa: N806
fvSchemetext + 'div((muEff*dev(T(grad(U)))))\tGauss linear;\n\t'
)
- fvSchemetext = fvSchemetext + 'div(phi,k)\tGauss upwind;\n\t'
- fvSchemetext = fvSchemetext + 'div(phi,epsilon)\tGauss upwind;\n\t'
- fvSchemetext = (
+ fvSchemetext = fvSchemetext + 'div(phi,k)\tGauss upwind;\n\t' # noqa: N806
+ fvSchemetext = fvSchemetext + 'div(phi,epsilon)\tGauss upwind;\n\t' # noqa: N806
+ fvSchemetext = ( # noqa: N806
fvSchemetext + 'div((phi|interpolate(porosity)),k)\tGauss upwind;\n\t'
)
- fvSchemetext = (
+ fvSchemetext = ( # noqa: N806
fvSchemetext + 'div((phi*interpolate(rho)),k)\tGauss upwind;\n\t'
)
- fvSchemetext = (
+ fvSchemetext = ( # noqa: N806
fvSchemetext
+ 'div((phi|interpolate(porosity)),epsilon)\tGauss upwind;\n\t'
)
- fvSchemetext = fvSchemetext + 'div(phi,omega)\tGauss upwind;\n\t'
- fvSchemetext = (
+ fvSchemetext = fvSchemetext + 'div(phi,omega)\tGauss upwind;\n\t' # noqa: N806
+ fvSchemetext = ( # noqa: N806
fvSchemetext
+ 'div((phi|interpolate(porosity)),omega)\tGauss upwind;\n\t'
)
- fvSchemetext = (
+ fvSchemetext = ( # noqa: N806
fvSchemetext + 'div((phi*interpolate(rho)),omega)\tGauss upwind;\n\t'
)
- fvSchemetext = (
+ fvSchemetext = ( # noqa: N806
fvSchemetext + 'div((phi*interpolate(rho)),epsilon)\tGauss upwind;\n'
)
- fvSchemetext = fvSchemetext + '}\n\n'
+ fvSchemetext = fvSchemetext + '}\n\n' # noqa: N806
# Laplacian
- fvSchemetext = (
+ fvSchemetext = ( # noqa: N806
fvSchemetext
+ 'laplacianSchemes\n{\n\tdefault\tGauss linear corrected;\n}\n\n'
)
# interpolation
- fvSchemetext = (
+ fvSchemetext = ( # noqa: N806
fvSchemetext + 'interpolationSchemes\n{\n\tdefault\tlinear;\n}\n\n'
)
# snGrad
- fvSchemetext = (
+ fvSchemetext = ( # noqa: N806
fvSchemetext + 'snGradSchemes\n{\n\tdefault\tcorrected;\n}\n\n'
)
# flux
- fvSchemetext = fvSchemetext + 'fluxRequired\n{\n\t'
- fvSchemetext = fvSchemetext + 'default\tno;\n\t'
- fvSchemetext = fvSchemetext + 'p_rgh;\n\t'
- fvSchemetext = fvSchemetext + 'pcorr;\n\t'
- fvSchemetext = fvSchemetext + 'alpha.water;\n'
- fvSchemetext = fvSchemetext + '}\n'
+ fvSchemetext = fvSchemetext + 'fluxRequired\n{\n\t' # noqa: N806
+ fvSchemetext = fvSchemetext + 'default\tno;\n\t' # noqa: N806
+ fvSchemetext = fvSchemetext + 'p_rgh;\n\t' # noqa: N806
+ fvSchemetext = fvSchemetext + 'pcorr;\n\t' # noqa: N806
+ fvSchemetext = fvSchemetext + 'alpha.water;\n' # noqa: N806
+ fvSchemetext = fvSchemetext + '}\n' # noqa: N806
- return fvSchemetext
+ return fvSchemetext # noqa: RET504
#############################################################
- def fvSolntext(self, data):
+ def fvSolntext(self, data): # noqa: N802
"""Creates the necessary text for fvSolution for openfoam7
Arguments:
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -188,99 +188,99 @@ def fvSolntext(self, data):
)
# Get the header text for the U-file
- fvSolntext = self.solverheader('fvSolution')
+ fvSolntext = self.solverheader('fvSolution') # noqa: N806
# Other data
- fvSolntext = fvSolntext + 'solvers\n{\n\t'
+ fvSolntext = fvSolntext + 'solvers\n{\n\t' # noqa: N806
# solvers: alpha
- fvSolntext = fvSolntext + '"alpha.water.*"\n\t{\n\t\t'
- fvSolntext = fvSolntext + 'nAlphaCorr\t1;\n\t\t'
- fvSolntext = fvSolntext + 'nAlphaSubCycles\t2;\n\t\t'
- fvSolntext = fvSolntext + 'alphaOuterCorrectors\tyes;\n\t\t'
- fvSolntext = fvSolntext + 'cAlpha\t1;\n\t\t'
- fvSolntext = fvSolntext + 'MULESCorr\tno;\n\t\t'
- fvSolntext = fvSolntext + 'nLimiterIter\t3;\n\t\t'
- fvSolntext = fvSolntext + 'solver\tsmoothSolver;\n\t\t'
- fvSolntext = fvSolntext + 'smoother\tsymGaussSeidel;\n\t\t'
- fvSolntext = fvSolntext + 'tolerance\t1e-08;\n\t\t'
- fvSolntext = fvSolntext + 'relTol\t0;\n\t}\n\n\t'
+ fvSolntext = fvSolntext + '"alpha.water.*"\n\t{\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'nAlphaCorr\t1;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'nAlphaSubCycles\t2;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'alphaOuterCorrectors\tyes;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'cAlpha\t1;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'MULESCorr\tno;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'nLimiterIter\t3;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'solver\tsmoothSolver;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'smoother\tsymGaussSeidel;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'tolerance\t1e-08;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'relTol\t0;\n\t}\n\n\t' # noqa: N806
# solvers: pcorr
- fvSolntext = fvSolntext + '"pcorr.*"\n\t{\n\t\t'
- fvSolntext = fvSolntext + 'solver\tPCG;\n\t\t'
- fvSolntext = fvSolntext + 'preconditioner\tDIC;\n\t\t'
- fvSolntext = fvSolntext + 'tolerance\t1e-05;\n\t\t'
- fvSolntext = fvSolntext + 'relTol\t0;\n\t}\n\n\t'
+ fvSolntext = fvSolntext + '"pcorr.*"\n\t{\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'solver\tPCG;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'preconditioner\tDIC;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'tolerance\t1e-05;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'relTol\t0;\n\t}\n\n\t' # noqa: N806
# solvers: pcorrFinal
- fvSolntext = fvSolntext + 'pcorrFinal\n\t{\n\t\t'
- fvSolntext = fvSolntext + '$pcorr;\n\t\t'
- fvSolntext = fvSolntext + 'relTol\t0;\n\t}\n\n\t'
+ fvSolntext = fvSolntext + 'pcorrFinal\n\t{\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + '$pcorr;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'relTol\t0;\n\t}\n\n\t' # noqa: N806
# solvers: p_rgh
- fvSolntext = fvSolntext + 'p_rgh\n\t{\n\t\t'
- fvSolntext = fvSolntext + 'solver\tPCG;\n\t\t'
- fvSolntext = fvSolntext + 'preconditioner\tDIC;\n\t\t'
- fvSolntext = fvSolntext + 'tolerance\t1e-07;\n\t\t'
- fvSolntext = fvSolntext + 'relTol\t0.05;\n\t}\n\n\t'
+ fvSolntext = fvSolntext + 'p_rgh\n\t{\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'solver\tPCG;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'preconditioner\tDIC;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'tolerance\t1e-07;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'relTol\t0.05;\n\t}\n\n\t' # noqa: N806
# solvers: p_rghFinal
- fvSolntext = fvSolntext + 'p_rghFinal\n\t{\n\t\t'
- fvSolntext = fvSolntext + '$p_rgh;\n\t\t'
- fvSolntext = fvSolntext + 'relTol\t0;\n\t}\n\n\t'
+ fvSolntext = fvSolntext + 'p_rghFinal\n\t{\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + '$p_rgh;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'relTol\t0;\n\t}\n\n\t' # noqa: N806
# solvers: U
- fvSolntext = fvSolntext + 'U\n\t{\n\t\t'
- fvSolntext = fvSolntext + 'solver\tsmoothSolver;\n\t\t'
- fvSolntext = fvSolntext + 'smoother\tsymGaussSeidel;\n\t\t'
- fvSolntext = fvSolntext + 'tolerance\t1e-06;\n\t\t'
- fvSolntext = fvSolntext + 'relTol\t0;\n\t}\n'
+ fvSolntext = fvSolntext + 'U\n\t{\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'solver\tsmoothSolver;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'smoother\tsymGaussSeidel;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'tolerance\t1e-06;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'relTol\t0;\n\t}\n' # noqa: N806
# Turbulece variables (if exist)
- if (int(turb) == 1) or (int(turb) == 2):
- fvSolntext = fvSolntext + '\n\t'
- fvSolntext = fvSolntext + '"(k|epsilon|omega|B|nuTilda).*"\n\t{\n\t\t'
- fvSolntext = fvSolntext + 'solver\tsmoothSolver;\n\t\t'
- fvSolntext = fvSolntext + 'smoother\tsymGaussSeidel;\n\t\t'
- fvSolntext = fvSolntext + 'tolerance\t1e-08;\n\t\t'
- fvSolntext = fvSolntext + 'relTol\t0;\n\t}\n'
+ if (int(turb) == 1) or (int(turb) == 2): # noqa: PLR2004
+ fvSolntext = fvSolntext + '\n\t' # noqa: N806
+ fvSolntext = fvSolntext + '"(k|epsilon|omega|B|nuTilda).*"\n\t{\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'solver\tsmoothSolver;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'smoother\tsymGaussSeidel;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'tolerance\t1e-08;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'relTol\t0;\n\t}\n' # noqa: N806
# solvers: cellDisplacement (for flume)
- if int(simtype) == 4:
+ if int(simtype) == 4: # noqa: PLR2004
# solvers: cellDisplacement (for flume)
- fvSolntext = fvSolntext + '\n\t'
- fvSolntext = fvSolntext + 'cellDisplacement\n\t{\n\t\t'
- fvSolntext = fvSolntext + 'solver\tGAMG;\n\t\t'
- fvSolntext = fvSolntext + 'tolerance\t1e-05;\n\t\t'
- fvSolntext = fvSolntext + 'relTol\t0;\n\t\t'
- fvSolntext = fvSolntext + 'smoother\tGaussSeidel;\n\t\t'
- fvSolntext = fvSolntext + 'cacheAgglomeration\tfalse;\n\t\t'
- fvSolntext = fvSolntext + 'nCellsInCoarsestLevel\t10;\n\t\t'
- fvSolntext = fvSolntext + 'agglomerator\tfaceAreaPair;\n\t\t'
- fvSolntext = fvSolntext + 'mergeLevels\t1;\n\t}\n\n\t'
+ fvSolntext = fvSolntext + '\n\t' # noqa: N806
+ fvSolntext = fvSolntext + 'cellDisplacement\n\t{\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'solver\tGAMG;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'tolerance\t1e-05;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'relTol\t0;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'smoother\tGaussSeidel;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'cacheAgglomeration\tfalse;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'nCellsInCoarsestLevel\t10;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'agglomerator\tfaceAreaPair;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'mergeLevels\t1;\n\t}\n\n\t' # noqa: N806
# solvers: cellDisplacementFinal(for flume)
- fvSolntext = fvSolntext + 'cellDisplacementFinal\n\t{\n\t\t'
- fvSolntext = fvSolntext + '$cellDisplacement;\n\t\t'
- fvSolntext = fvSolntext + 'relTol\t0;\n\t}\n'
+ fvSolntext = fvSolntext + 'cellDisplacementFinal\n\t{\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + '$cellDisplacement;\n\t\t' # noqa: N806
+ fvSolntext = fvSolntext + 'relTol\t0;\n\t}\n' # noqa: N806
# Close solvers
- fvSolntext = fvSolntext + '}\n\n'
+ fvSolntext = fvSolntext + '}\n\n' # noqa: N806
# PIMPLE
- fvSolntext = fvSolntext + 'PIMPLE\n{\n\t'
- fvSolntext = fvSolntext + 'momentumPredictor\tno;\n\t'
- fvSolntext = fvSolntext + 'nOuterCorrectors\t1;\n\t'
- fvSolntext = fvSolntext + 'nCorrectors\t3;\n\t'
- fvSolntext = fvSolntext + 'nNonOrthogonalCorrectors\t0;\n}\n\n'
+ fvSolntext = fvSolntext + 'PIMPLE\n{\n\t' # noqa: N806
+ fvSolntext = fvSolntext + 'momentumPredictor\tno;\n\t' # noqa: N806
+ fvSolntext = fvSolntext + 'nOuterCorrectors\t1;\n\t' # noqa: N806
+ fvSolntext = fvSolntext + 'nCorrectors\t3;\n\t' # noqa: N806
+ fvSolntext = fvSolntext + 'nNonOrthogonalCorrectors\t0;\n}\n\n' # noqa: N806
# Relaxation factors
- fvSolntext = fvSolntext + 'relaxationFactors\n{\n\t'
- fvSolntext = fvSolntext + 'fields\n\t{\n\t}\n\t'
- fvSolntext = fvSolntext + 'equations\n\t{\n\t\t".*"\t1;\n\t}\n}'
+ fvSolntext = fvSolntext + 'relaxationFactors\n{\n\t' # noqa: N806
+ fvSolntext = fvSolntext + 'fields\n\t{\n\t}\n\t' # noqa: N806
+ fvSolntext = fvSolntext + 'equations\n\t{\n\t\t".*"\t1;\n\t}\n}' # noqa: N806
- return fvSolntext
+ return fvSolntext # noqa: RET504
#############################################################
def cdicttext(self, data):
@@ -290,7 +290,7 @@ def cdicttext(self, data):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -301,7 +301,7 @@ def cdicttext(self, data):
simtype = ', '.join(
hydroutil.extract_element_from_json(data, ['Events', 'SimulationType'])
)
- if int(simtype) == 4:
+ if int(simtype) == 4: # noqa: PLR2004
cdicttext = cdicttext + '\napplication \t olaDyMFlow;\n\n'
else:
cdicttext = cdicttext + '\napplication \t olaFlow;\n\n'
@@ -314,14 +314,14 @@ def cdicttext(self, data):
cdicttext = cdicttext + 'startFrom \t latestTime;\n\n'
elif restart == 'No':
# Start time
- startT = ', '.join(
+ startT = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'StartTime'])
)
cdicttext = cdicttext + 'startFrom \t startTime;\n\n'
cdicttext = cdicttext + 'startTime \t' + startT + ';\n\n'
# End time
- endT = ', '.join(
+ endT = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'EndTime'])
)
cdicttext = cdicttext + 'stopAt \t endTime;\n\n'
@@ -349,7 +349,7 @@ def cdicttext(self, data):
cdicttext = cdicttext + 'maxAlphaCo \t 1.0;\n\n'
cdicttext = cdicttext + 'maxDeltaT \t 1;\n\n'
- return cdicttext
+ return cdicttext # noqa: RET504
#############################################################
def cdictcheck(self, data):
@@ -359,29 +359,29 @@ def cdictcheck(self, data):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
# Start time
- startT = hydroutil.extract_element_from_json(data, ['Events', 'StartTime'])
+ startT = hydroutil.extract_element_from_json(data, ['Events', 'StartTime']) # noqa: N806
if startT == [None]:
return -1
# End time
- endT = hydroutil.extract_element_from_json(data, ['Events', 'EndTime'])
+ endT = hydroutil.extract_element_from_json(data, ['Events', 'EndTime']) # noqa: N806
if endT == [None]:
return -1
# deltaT
- deltaT = hydroutil.extract_element_from_json(
+ deltaT = hydroutil.extract_element_from_json( # noqa: N806
data, ['Events', 'TimeInterval']
)
if deltaT == [None]:
return -1
# WriteT
- writeT = hydroutil.extract_element_from_json(
+ writeT = hydroutil.extract_element_from_json( # noqa: N806
data, ['Events', 'WriteInterval']
)
if writeT == [None]:
@@ -391,7 +391,7 @@ def cdictcheck(self, data):
return 0
#############################################################
- def cdictFtext(self, data):
+ def cdictFtext(self, data): # noqa: N802
"""Creates the necessary text for controlDict for openfoam7
This is used for force computation with Dakota
@@ -399,7 +399,7 @@ def cdictFtext(self, data):
---------
data: all the JSON data
- """
+ """ # noqa: D205, D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -410,7 +410,7 @@ def cdictFtext(self, data):
simtype = ', '.join(
hydroutil.extract_element_from_json(data, ['Events', 'SimulationType'])
)
- if int(simtype) == 4:
+ if int(simtype) == 4: # noqa: PLR2004
cdicttext = cdicttext + '\napplication \t olaDyMFlow;\n\n'
else:
cdicttext = cdicttext + '\napplication \t olaFlow;\n\n'
@@ -423,21 +423,21 @@ def cdictFtext(self, data):
cdicttext = cdicttext + 'startFrom \t latestTime;\n\n'
elif restart == 'No':
# Start time
- startT = ', '.join(
+ startT = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'StartTime'])
)
cdicttext = cdicttext + 'startFrom \t startTime;\n\n'
cdicttext = cdicttext + 'startTime \t' + startT + ';\n\n'
# End time
- endT = ', '.join(
+ endT = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'EndTime'])
)
cdicttext = cdicttext + 'stopAt \t endTime;\n\n'
cdicttext = cdicttext + 'endTime \t' + endT + ';\n\n'
# Time interval
- deltaT = ', '.join(
+ deltaT = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'TimeInterval'])
)
cdicttext = cdicttext + 'deltaT \t' + deltaT + ';\n\n'
@@ -446,7 +446,7 @@ def cdictFtext(self, data):
cdicttext = cdicttext + 'writeControl \t adjustableRunTime;\n\n'
# Write interval
- writeT = ', '.join(
+ writeT = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(data, ['Events', 'WriteInterval'])
)
cdicttext = cdicttext + 'writeInterval \t' + writeT + ';\n\n'
@@ -489,4 +489,4 @@ def cdictFtext(self, data):
cdicttext = cdicttext + 'direction\t(1 0 0);\n\t\t\t'
cdicttext = cdicttext + 'cumulative\tno;\n\t\t}\n\t}\n}'
- return cdicttext
+ return cdicttext # noqa: RET504
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py b/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py
index a3e25ee7c..154e2b209 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7Turbulence.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -47,7 +47,7 @@ class of7Turbulence:
-------
decomptext: Get all the text for the decomposeParDict
- """
+ """ # noqa: D205, D404
#############################################################
def turbtext(self, data):
@@ -57,7 +57,7 @@ def turbtext(self, data):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -77,7 +77,7 @@ def turbtext(self, data):
turbtext = turbtext + '\tRASModel\tkEpsilon;\n'
turbtext = turbtext + '\tturbulence\ton;\n'
turbtext = turbtext + '\tprintCoeffs\ton;\n}\n'
- elif int(turbmodel) == 2:
+ elif int(turbmodel) == 2: # noqa: PLR2004
turbtext = turbtext + 'simulationType\tRAS;\n\n'
turbtext = turbtext + 'RAS\n{\n'
turbtext = turbtext + '\tRASModel\tkOmegaSST;\n'
@@ -93,7 +93,7 @@ def turbheader(self):
Variable
-----------
header: Header for the turbulence properties-file
- """
+ """ # noqa: D400, D401
header = """/*--------------------------*- NHERI SimCenter -*----------------------------*\\
| | H |
| | Y | HydroUQ: Water-based Natural Hazards Modeling Application
@@ -103,7 +103,7 @@ def turbheader(self):
\\*---------------------------------------------------------------------------*/
FoamFile
{\n\tversion\t2.0;\n\tformat\tascii;\n\tclass\tdictionary;\n\tlocation\t"constant";\n\tobject\tturbulenceProperties;\n}
-// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n"""
+// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291
# Return the header for U file
- return header
+ return header # noqa: RET504
diff --git a/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py b/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py
index d351f5a21..2686f3c9b 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/of7Uboundary.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -48,10 +48,10 @@ class of7Uboundary:
-------
Utext: Get s all the text for the U-file
- """
+ """ # noqa: D205, D404
#############################################################
- def Utext(self, data, fipath, patches):
+ def Utext(self, data, fipath, patches): # noqa: N802
"""Creates the necessary folders for openfoam7
Arguments:
@@ -60,12 +60,12 @@ def Utext(self, data, fipath, patches):
patches: List of boundary patches
fipath: Path where the dakota.json file exists
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
# Number of moving walls
- numMovWall = 0
+ numMovWall = 0 # noqa: N806
# Get the header text for the U-file
utext = self.Uheader()
@@ -80,15 +80,15 @@ def Utext(self, data, fipath, patches):
data, ['Events', 'VelocityType_' + patchname]
)
if patch == [None]:
- Utype = -1
+ Utype = -1 # noqa: N806
else:
- Utype = ', '.join(
+ Utype = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(
data, ['Events', 'VelocityType_' + patchname]
)
)
- if int(Utype) == 103 or int(Utype) == 104:
- numMovWall += 1
+ if int(Utype) == 103 or int(Utype) == 104: # noqa: PLR2004
+ numMovWall += 1 # noqa: N806
utext = utext + self.Upatchtext(
data, Utype, patchname, fipath, numMovWall
)
@@ -105,16 +105,16 @@ def Utext(self, data, fipath, patches):
utext = utext + '}\n\n'
# Return the text for velocity BC
- return utext
+ return utext # noqa: RET504
#############################################################
- def Uheader(self):
+ def Uheader(self): # noqa: N802
"""Creates the text for the header
Variable
-----------
header: Header for the U-file
- """
+ """ # noqa: D400, D401
header = """/*--------------------------*- NHERI SimCenter -*----------------------------*\\
| | H |
| | Y | HydroUQ: Water-based Natural Hazards Modeling Application
@@ -124,16 +124,16 @@ def Uheader(self):
\\*---------------------------------------------------------------------------*/
FoamFile
{\n\tversion\t2.0;\n\tformat\tascii;\n\tclass\tvolVectorField;\n\tlocation\t"0";\n\tobject\tU;\n}
-// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n"""
+// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\n\n""" # noqa: W291
header = header + 'dimensions\t[0 1 -1 0 0 0 0];\n\n'
header = header + 'internalField\tuniform (0 0 0);\n\n'
# Return the header for U file
- return header
+ return header # noqa: RET504
#############################################################
- def Upatchtext(self, data, Utype, patchname, fipath, numMovWall):
+ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall): # noqa: C901, N802, N803
"""Creates the text the velocity boundary condition
Arguments:
@@ -147,21 +147,21 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall):
-----------
Utext: Text for the particular patch
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
# Inlet types
# For each type, get the text
- if int(Utype) == 101:
+ if int(Utype) == 101: # noqa: PLR2004
# SW solutions (1)
- Utext = '\t{\n\t\t'
- Utext = Utext + 'type\ttimeVaryingMappedFixedValue;\n\t\t'
- Utext = Utext + 'offset\t(0 0 0);\n\t\t'
- Utext = Utext + 'setAverage\toff;\n'
- Utext = Utext + '\t}\n'
+ Utext = '\t{\n\t\t' # noqa: N806
+ Utext = Utext + 'type\ttimeVaryingMappedFixedValue;\n\t\t' # noqa: N806
+ Utext = Utext + 'offset\t(0 0 0);\n\t\t' # noqa: N806
+ Utext = Utext + 'setAverage\toff;\n' # noqa: N806
+ Utext = Utext + '\t}\n' # noqa: N806
- elif int(Utype) == 102:
+ elif int(Utype) == 102: # noqa: PLR2004
# Inlet: constant velocity
# Get the velocity values
velo = hydroutil.extract_element_from_json(
@@ -184,9 +184,9 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall):
vz = vels[2]
# Get the text
- Utext = '\t{\n\t\t'
- Utext = Utext + 'type\tfixedValue;\n\t\t'
- Utext = (
+ Utext = '\t{\n\t\t' # noqa: N806
+ Utext = Utext + 'type\tfixedValue;\n\t\t' # noqa: N806
+ Utext = ( # noqa: N806
Utext
+ 'value\t('
+ str(vx)
@@ -197,11 +197,11 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall):
+ ');\n\t}\n'
)
- elif int(Utype) == 103:
+ elif int(Utype) == 103: # noqa: PLR2004
# Inlet Moving wall (OSU flume)
- Utext = '\t{\n\t\t'
- Utext = Utext + 'type\tmovingWallVelocity;\n\t\t'
- Utext = Utext + 'value\tuniform (0 0 0);\n\t}\n'
+ Utext = '\t{\n\t\t' # noqa: N806
+ Utext = Utext + 'type\tmovingWallVelocity;\n\t\t' # noqa: N806
+ Utext = Utext + 'value\tuniform (0 0 0);\n\t}\n' # noqa: N806
# Create the files required
# Moving wall file
# Get the displacement and waterheight file name
@@ -214,8 +214,8 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall):
data, ['Events', 'OSUMovingWallDisp_' + patchname]
)
)
- dispfilepath = os.path.join(fipath, dispfilename)
- if os.path.exists(dispfilepath):
+ dispfilepath = os.path.join(fipath, dispfilename) # noqa: PTH118
+ if os.path.exists(dispfilepath): # noqa: PTH110
heightfilename = hydroutil.extract_element_from_json(
data, ['Events', 'OSUMovingWallHeight_' + patchname]
)
@@ -225,8 +225,8 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall):
data, ['Events', 'OSUMovingWallHeight_' + patchname]
)
)
- heightfilepath = os.path.join(fipath, heightfilename)
- if not os.path.exists(heightfilepath):
+ heightfilepath = os.path.join(fipath, heightfilename) # noqa: PTH118
+ if not os.path.exists(heightfilepath): # noqa: PTH110
heightfilepath = 'None'
else:
heightfilepath = 'None'
@@ -239,11 +239,11 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall):
# Dynamic mesh dictionary
self.of7dynamicMeshdict(fipath)
- elif int(Utype) == 104:
+ elif int(Utype) == 104: # noqa: PLR2004
# Inlet Moving wall (Gen flume)
- Utext = '\t{\n\t\t'
- Utext = Utext + 'type\tmovingWallVelocity;\n\t\t'
- Utext = Utext + 'value\tuniform (0 0 0);\n\t}\n'
+ Utext = '\t{\n\t\t' # noqa: N806
+ Utext = Utext + 'type\tmovingWallVelocity;\n\t\t' # noqa: N806
+ Utext = Utext + 'value\tuniform (0 0 0);\n\t}\n' # noqa: N806
# Create the files required
# Moving wall file
# Get the displacement and waterheight file name
@@ -257,8 +257,8 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall):
data, ['Events', 'MovingWallDisp_' + patchname]
)
)
- dispfilepath = os.path.join(fipath, dispfilename)
- if os.path.exists(dispfilepath):
+ dispfilepath = os.path.join(fipath, dispfilename) # noqa: PTH118
+ if os.path.exists(dispfilepath): # noqa: PTH110
heightfilename = hydroutil.extract_element_from_json(
data, ['Events', 'MovingWallHeight_' + patchname]
)
@@ -268,8 +268,8 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall):
data, ['Events', 'MovingWallHeight_' + patchname]
)
)
- heightfilepath = os.path.join(fipath, heightfilename)
- if not os.path.exists(heightfilepath):
+ heightfilepath = os.path.join(fipath, heightfilename) # noqa: PTH118
+ if not os.path.exists(heightfilepath): # noqa: PTH110
heightfilepath = 'None'
else:
heightfilepath = 'None'
@@ -282,12 +282,12 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall):
# Dynamic mesh dictionary
self.of7dynamicMeshdict(fipath)
- elif int(Utype) == 201:
+ elif int(Utype) == 201: # noqa: PLR2004
# Outlet zero gradient
- Utext = '\t{\n\t\t'
- Utext = Utext + 'type\tzeroGradient;\n\t}\n'
+ Utext = '\t{\n\t\t' # noqa: N806
+ Utext = Utext + 'type\tzeroGradient;\n\t}\n' # noqa: N806
- elif int(Utype) == 202:
+ elif int(Utype) == 202: # noqa: PLR2004
# Outlet: inletOutlet
# Get the velocity values
velo = hydroutil.extract_element_from_json(
@@ -310,9 +310,9 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall):
vz = vels[2]
# Get the text
- Utext = '\t{\n\t\t'
- Utext = Utext + 'type\tinletOutlet;\n\t\t'
- Utext = (
+ Utext = '\t{\n\t\t' # noqa: N806
+ Utext = Utext + 'type\tinletOutlet;\n\t\t' # noqa: N806
+ Utext = ( # noqa: N806
Utext
+ 'inletValue\tuniform ('
+ str(vx)
@@ -322,7 +322,7 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall):
+ str(vz)
+ ');\n\t\t'
)
- Utext = (
+ Utext = ( # noqa: N806
Utext
+ 'value\tuniform ('
+ str(vx)
@@ -332,23 +332,23 @@ def Upatchtext(self, data, Utype, patchname, fipath, numMovWall):
+ str(vz)
+ ');\n'
)
- Utext = Utext + '\t}\n'
+ Utext = Utext + '\t}\n' # noqa: N806
- elif int(Utype) == 301:
+ elif int(Utype) == 301: # noqa: PLR2004
# Wall: noSlip
- Utext = '\t{\n\t\t'
- Utext = Utext + 'type\tnoSlip;\n\t}\n'
+ Utext = '\t{\n\t\t' # noqa: N806
+ Utext = Utext + 'type\tnoSlip;\n\t}\n' # noqa: N806
else:
# Default: Empty
- Utext = '\t{\n\t\t'
- Utext = Utext + 'type\tempty;\n\t}\n'
+ Utext = '\t{\n\t\t' # noqa: N806
+ Utext = Utext + 'type\tempty;\n\t}\n' # noqa: N806
# Return the header for U file
return Utext
#############################################################
- def Uchecks(self, data, fipath, patches):
+ def Uchecks(self, data, fipath, patches): # noqa: C901, N802
"""Creates the data files required for the OSU moving wall
Arguments:
@@ -357,12 +357,12 @@ def Uchecks(self, data, fipath, patches):
fipath: Path to the dakota.json file location
patches: List of patches
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
# Number of moving walls
- numMovWall = 0
+ numMovWall = 0 # noqa: N806
# Loop over all patches
for patchname in patches:
@@ -371,18 +371,18 @@ def Uchecks(self, data, fipath, patches):
data, ['Events', 'VelocityType_' + patchname]
)
if patch == [None]:
- Utype = -1
+ Utype = -1 # noqa: N806
else:
- Utype = ', '.join(
+ Utype = ', '.join( # noqa: N806
hydroutil.extract_element_from_json(
data, ['Events', 'VelocityType_' + patchname]
)
)
# Checks for different U-types
- if int(Utype) == 103:
+ if int(Utype) == 103: # noqa: PLR2004
# Checking for multiple moving walls
- numMovWall += 1
+ numMovWall += 1 # noqa: N806
if numMovWall > 1:
return -1
@@ -392,19 +392,19 @@ def Uchecks(self, data, fipath, patches):
)
if dispfilename == [None]:
return -1
- else:
+ else: # noqa: RET505
dispfilename = ', '.join(
hydroutil.extract_element_from_json(
data, ['Events', 'OSUMovingWallDisp_' + patchname]
)
)
- pathF = os.path.join(fipath, dispfilename)
- if not os.path.exists(pathF):
+ pathF = os.path.join(fipath, dispfilename) # noqa: PTH118, N806
+ if not os.path.exists(pathF): # noqa: PTH110
return -1
- elif int(Utype) == 104:
+ elif int(Utype) == 104: # noqa: PLR2004
# Checking for multiple moving walls
- numMovWall += 1
+ numMovWall += 1 # noqa: N806
if numMovWall > 1:
return -1
@@ -414,14 +414,14 @@ def Uchecks(self, data, fipath, patches):
)
if dispfilename == [None]:
return -1
- else:
+ else: # noqa: RET505
dispfilename = ', '.join(
hydroutil.extract_element_from_json(
data, ['Events', 'MovingWallDisp_' + patchname]
)
)
- pathF = os.path.join(fipath, dispfilename)
- if not os.path.exists(pathF):
+ pathF = os.path.join(fipath, dispfilename) # noqa: PTH118, N806
+ if not os.path.exists(pathF): # noqa: PTH110
return -1
# If all checks passes
@@ -435,13 +435,13 @@ def of7wavemakerdict(self, fipath):
---------
fipath: Path to the dakota.json file location
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
# Get the file ID
- filepath = os.path.join(fipath, 'constant', 'wavemakerMovementDict')
- fileID = open(filepath, 'w')
+ filepath = os.path.join(fipath, 'constant', 'wavemakerMovementDict') # noqa: PTH118
+ fileID = open(filepath, 'w') # noqa: SIM115, PTH123, N806
# Header
header = hydroutil.of7header(
'dictionary', 'constant', 'wavemakerMovementDict'
@@ -454,20 +454,20 @@ def of7wavemakerdict(self, fipath):
fileID.close()
#############################################################
- def of7dynamicMeshdict(self, fipath):
+ def of7dynamicMeshdict(self, fipath): # noqa: N802
"""Creates the dynamic mesh dictionary for the moving wall
Arguments:
---------
fipath: Path to the dakota.json file location
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
# Get the file ID
- filepath = os.path.join(fipath, 'constant', 'dynamicMeshDict')
- fileID = open(filepath, 'w')
+ filepath = os.path.join(fipath, 'constant', 'dynamicMeshDict') # noqa: PTH118
+ fileID = open(filepath, 'w') # noqa: SIM115, PTH123, N806
# Header
header = hydroutil.of7header('dictionary', 'constant', 'dynamicMeshDict')
fileID.write(header)
@@ -480,17 +480,17 @@ def of7dynamicMeshdict(self, fipath):
fileID.close()
#############################################################
- def OSUwavemakerText(self, fipath, dispfilepath, heightfilepath, numMovWall):
+ def OSUwavemakerText(self, fipath, dispfilepath, heightfilepath, numMovWall): # noqa: ARG002, C901, N802, N803
"""Creates the wavemaker text file for the OSU moving wall
Arguments:
---------
fipath: Path to the dakota.json file location
- """
+ """ # noqa: D400, D401
# Get the file ID
- filepath = os.path.join(fipath, 'constant', 'wavemakerMovement.txt')
- fileID = open(filepath, 'w')
+ filepath = os.path.join(fipath, 'constant', 'wavemakerMovement.txt') # noqa: PTH118
+ fileID = open(filepath, 'w') # noqa: SIM115, PTH123, N806
# Start writing the file
fileID.write('wavemakerType\tPiston;\n')
@@ -501,12 +501,12 @@ def OSUwavemakerText(self, fipath, dispfilepath, heightfilepath, numMovWall):
# Get the frequency of the wavemaker
frequency = 0
waterdepth = 0
- filewm = open(dispfilepath)
- Lines = filewm.readlines()
+ filewm = open(dispfilepath) # noqa: SIM115, PTH123
+ Lines = filewm.readlines() # noqa: N806
count = 0
for line in Lines:
- count += 1
- if count == 37:
+ count += 1 # noqa: SIM113
+ if count == 37: # noqa: PLR2004
stra = line.replace('% SampleRate: ', '')
stra2 = stra.replace(' Hz', '')
frequency = 1 / float(stra2)
@@ -514,14 +514,14 @@ def OSUwavemakerText(self, fipath, dispfilepath, heightfilepath, numMovWall):
count = 0
for line in Lines:
count += 1
- if count == 61:
+ if count == 61: # noqa: PLR2004
stra = line.replace('% StillWaterDepth: ', '')
waterdepth = float(stra)
break
# Count the number of lines
countlines = 0
- with open(dispfilepath) as fdisp:
+ with open(dispfilepath) as fdisp: # noqa: PTH123
for line2 in fdisp:
if line2.strip():
countlines += 1
@@ -530,7 +530,7 @@ def OSUwavemakerText(self, fipath, dispfilepath, heightfilepath, numMovWall):
# Create the timeseries
time = 0
fileID.write('timeSeries\t' + str(countlines) + '(\n')
- for ii in range(countlines):
+ for ii in range(countlines): # noqa: B007
fileID.write(str(time) + '\n')
time = time + frequency
fileID.write(');\n\n')
@@ -540,7 +540,7 @@ def OSUwavemakerText(self, fipath, dispfilepath, heightfilepath, numMovWall):
count = 0
for line in Lines:
count += 1
- if count > 72:
+ if count > 72: # noqa: PLR2004
if line != '\n':
data = float(line)
fileID.write(str(data) + '\n')
@@ -550,29 +550,29 @@ def OSUwavemakerText(self, fipath, dispfilepath, heightfilepath, numMovWall):
if heightfilepath != 'None':
# Write the paddle Eta
fileID.write('paddleEta 1(\n' + str(countlines) + '(\n')
- filewmg = open(heightfilepath)
- Lines2 = filewmg.readlines()
+ filewmg = open(heightfilepath) # noqa: SIM115, PTH123
+ Lines2 = filewmg.readlines() # noqa: N806
count = 0
for line in Lines2:
count += 1
- if count > 72:
+ if count > 72: # noqa: PLR2004
if line != '\n':
data = float(line) + waterdepth
fileID.write(str(data) + '\n')
fileID.write(')\n);')
#############################################################
- def GenwavemakerText(self, fipath, dispfilepath, heightfilepath, numMovWall):
+ def GenwavemakerText(self, fipath, dispfilepath, heightfilepath, numMovWall): # noqa: ARG002, C901, N802, N803
"""Creates the wavemaker text file for a general moving wall
Arguments:
---------
fipath: Path to the dakota.json file location
- """
+ """ # noqa: D400, D401
# Get the file ID
- filepath = os.path.join(fipath, 'constant', 'wavemakerMovement.txt')
- fileID = open(filepath, 'w')
+ filepath = os.path.join(fipath, 'constant', 'wavemakerMovement.txt') # noqa: PTH118
+ fileID = open(filepath, 'w') # noqa: SIM115, PTH123, N806
# Start writing the file
fileID.write('wavemakerType\tPiston;\n')
@@ -581,18 +581,18 @@ def GenwavemakerText(self, fipath, dispfilepath, heightfilepath, numMovWall):
# Create the wavemaker movement file
# Get the frequency of the wavemaker
- filewm = open(dispfilepath)
- Lines = filewm.readlines()
+ filewm = open(dispfilepath) # noqa: SIM115, PTH123
+ Lines = filewm.readlines() # noqa: N806
count = 0
for line in Lines:
- count += 1
+ count += 1 # noqa: SIM113
if count == 1:
frequency = float(line)
break
# Count the number of lines
countlines = 0
- with open(dispfilepath) as fdisp:
+ with open(dispfilepath) as fdisp: # noqa: PTH123
for line2 in fdisp:
if line2.strip():
countlines += 1
@@ -601,7 +601,7 @@ def GenwavemakerText(self, fipath, dispfilepath, heightfilepath, numMovWall):
# Create the timeseries
time = 0
fileID.write('timeSeries\t' + str(countlines) + '(\n')
- for ii in range(countlines):
+ for ii in range(countlines): # noqa: B007
fileID.write(str(time) + '\n')
time = time + frequency
fileID.write(');\n\n')
@@ -620,8 +620,8 @@ def GenwavemakerText(self, fipath, dispfilepath, heightfilepath, numMovWall):
# Get the water depth and paddle eta
if heightfilepath != 'None':
# Get the height
- filewmg = open(heightfilepath)
- Lines2 = filewmg.readlines()
+ filewmg = open(heightfilepath) # noqa: SIM115, PTH123
+ Lines2 = filewmg.readlines() # noqa: N806
count = 0
for line in Lines2:
count += 1
diff --git a/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py b/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py
index 72455b971..ef3d4e423 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/openfoam7.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -63,7 +63,7 @@ class openfoam7:
-------
extract:
- """
+ """ # noqa: D404
#############################################################
def createfolder(self, data, path, args):
@@ -74,7 +74,7 @@ def createfolder(self, data, path, args):
data: all the JSON data
path: Path where the new folder needs to be created
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -83,45 +83,45 @@ def createfolder(self, data, path, args):
access_rights = 0o700
# Create 0-directory
- pathF = os.path.join(path, '0.org')
- if os.path.exists(pathF):
+ pathF = os.path.join(path, '0.org') # noqa: PTH118, N806
+ if os.path.exists(pathF): # noqa: PTH110
shutil.rmtree(pathF)
- os.mkdir(pathF, access_rights)
+ os.mkdir(pathF, access_rights) # noqa: PTH102
else:
- os.mkdir(pathF, access_rights)
+ os.mkdir(pathF, access_rights) # noqa: PTH102
# Create constant-directory
- pathF = os.path.join(path, 'constant')
- if os.path.exists(pathF):
+ pathF = os.path.join(path, 'constant') # noqa: PTH118, N806
+ if os.path.exists(pathF): # noqa: PTH110
shutil.rmtree(pathF)
- os.mkdir(pathF, access_rights)
+ os.mkdir(pathF, access_rights) # noqa: PTH102
else:
- os.mkdir(pathF, access_rights)
+ os.mkdir(pathF, access_rights) # noqa: PTH102
# Create the triSurface directory
- pathF = os.path.join(path, 'constant', 'triSurface')
- if os.path.exists(pathF):
+ pathF = os.path.join(path, 'constant', 'triSurface') # noqa: PTH118, N806
+ if os.path.exists(pathF): # noqa: PTH110
shutil.rmtree(pathF)
- os.mkdir(pathF, access_rights)
+ os.mkdir(pathF, access_rights) # noqa: PTH102
else:
- os.mkdir(pathF, access_rights)
+ os.mkdir(pathF, access_rights) # noqa: PTH102
# Create system-directory
- pathF = os.path.join(path, 'system')
- if os.path.exists(pathF):
+ pathF = os.path.join(path, 'system') # noqa: PTH118, N806
+ if os.path.exists(pathF): # noqa: PTH110
shutil.rmtree(pathF)
- os.mkdir(pathF, access_rights)
+ os.mkdir(pathF, access_rights) # noqa: PTH102
else:
- os.mkdir(pathF, access_rights)
+ os.mkdir(pathF, access_rights) # noqa: PTH102
# Get the information from json file
hydrobrain = ', '.join(
hydroutil.extract_element_from_json(data, ['remoteAppDir'])
)
- mesher = ', '.join(
+ mesher = ', '.join( # noqa: F841
hydroutil.extract_element_from_json(data, ['Events', 'MeshType'])
)
- simtype = ', '.join(
+ simtype = ', '.join( # noqa: F841
hydroutil.extract_element_from_json(data, ['Events', 'SimulationType'])
)
@@ -137,7 +137,7 @@ def createfolder(self, data, path, args):
caseruntext = (
caseruntext
+ 'export HYDROBRAIN='
- + os.path.join(
+ + os.path.join( # noqa: PTH118
hydrobrain, 'applications', 'createEVENT', 'GeoClawOpenFOAM'
)
+ '\n\n'
@@ -152,19 +152,19 @@ def createfolder(self, data, path, args):
caseruntext = caseruntext + 'module load python3\n\n'
# Move the case files to the present folder
- zerofldr = os.path.join(path, '0.org')
- zero2fldr = '0'
- cstfldr = os.path.join(path, 'constant')
- systfldr = os.path.join(path, 'system')
+ zerofldr = os.path.join(path, '0.org') # noqa: PTH118
+ zero2fldr = '0' # noqa: F841
+ cstfldr = os.path.join(path, 'constant') # noqa: PTH118
+ systfldr = os.path.join(path, 'system') # noqa: PTH118
caseruntext = caseruntext + 'cp -r ' + zerofldr + ' .\n'
caseruntext = caseruntext + 'cp -r 0.org 0\n'
caseruntext = caseruntext + 'cp -r ' + cstfldr + ' .\n'
caseruntext = caseruntext + 'cp -r ' + systfldr + ' .\n\n'
# Create the caserun file
- if os.path.exists('caserun.sh'):
- os.remove('caserun.sh')
- scriptfile = open('caserun.sh', 'w')
+ if os.path.exists('caserun.sh'): # noqa: PTH110
+ os.remove('caserun.sh') # noqa: PTH107
+ scriptfile = open('caserun.sh', 'w') # noqa: SIM115, PTH123
scriptfile.write(caseruntext)
scriptfile.close()
@@ -180,7 +180,7 @@ def creategeometry(self, data, path):
data: all the JSON data
path: Path where the geometry files (STL) needs to be created
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -190,27 +190,27 @@ def creategeometry(self, data, path):
)
# Create the geometry related files
- Geometry = of7Geometry()
+ Geometry = of7Geometry() # noqa: N806
if int(mesher[0]) == 1:
return 0
- elif int(mesher[0]) == 0 or int(mesher[0]) == 2:
+ elif int(mesher[0]) == 0 or int(mesher[0]) == 2: # noqa: RET505, PLR2004
geomcode = Geometry.geomcheck(data, path)
if geomcode == -1:
return -1
- else:
+ else: # noqa: RET505
stlcode = Geometry.createOFSTL(data, path)
if stlcode < 0:
return -1
# Building related files
- Building = of7Building()
+ Building = of7Building() # noqa: N806
if int(mesher[0]) == 1:
return 0
- elif int(mesher[0]) == 0 or int(mesher[0]) == 2:
+ elif int(mesher[0]) == 0 or int(mesher[0]) == 2: # noqa: RET505, PLR2004
buildcode = Building.buildcheck(data, path)
if buildcode == -1:
return -1
- else:
+ else: # noqa: RET505
buildcode2 = Building.createbuilds(data, path)
if buildcode2 < 0:
return -1
@@ -232,7 +232,7 @@ def createmesh(self, data, path):
data: all the JSON data
path: Path where the geometry files (STL) needs to be created
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -242,30 +242,30 @@ def createmesh(self, data, path):
)
# Create the meshing related file
- Meshing = of7Meshing()
+ Meshing = of7Meshing() # noqa: N806
meshcode = Meshing.meshcheck(data, path)
if meshcode == -1:
return -1
- elif int(mesher[0]) == 0:
+ elif int(mesher[0]) == 0: # noqa: RET505
# blockMesh
bmeshtext = Meshing.bmeshtext(data)
fname = 'blockMeshDict'
- filepath = os.path.join(path, 'system', fname)
- bmeshfile = open(filepath, 'w')
+ filepath = os.path.join(path, 'system', fname) # noqa: PTH118
+ bmeshfile = open(filepath, 'w') # noqa: SIM115, PTH123
bmeshfile.write(bmeshtext)
bmeshfile.close()
# surfaceFeatureExtract
sfetext = Meshing.sfetext()
fname = 'surfaceFeatureExtractDict'
- filepath = os.path.join(path, 'system', fname)
- sfefile = open(filepath, 'w')
+ filepath = os.path.join(path, 'system', fname) # noqa: PTH118
+ sfefile = open(filepath, 'w') # noqa: SIM115, PTH123
sfefile.write(sfetext)
sfefile.close()
# snappyHexMesh
shmtext = Meshing.shmtext(data)
fname = 'snappyHexMeshDict'
- filepath = os.path.join(path, 'system', fname)
- shmfile = open(filepath, 'w')
+ filepath = os.path.join(path, 'system', fname) # noqa: PTH118
+ shmfile = open(filepath, 'w') # noqa: SIM115, PTH123
shmfile.write(shmtext)
shmfile.close()
@@ -290,17 +290,17 @@ def materials(self, data, path):
data: all the JSON data
path: Path where the geometry files (STL) needs to be created
- """
+ """ # noqa: D400, D401
# Create the transportProperties file
- Materials = of7Materials()
+ Materials = of7Materials() # noqa: N806
matcode = Materials.matcheck(data)
if matcode == -1:
return -1
- else:
+ else: # noqa: RET505
mattext = Materials.mattext(data)
fname = 'transportProperties'
- filepath = os.path.join(path, 'constant', fname)
- matfile = open(filepath, 'w')
+ filepath = os.path.join(path, 'constant', fname) # noqa: PTH118
+ matfile = open(filepath, 'w') # noqa: SIM115, PTH123
matfile.write(mattext)
matfile.close()
@@ -315,17 +315,17 @@ def initial(self, data, path):
data: all the JSON data
path: Path where the geometry files dakota.json lies
- """
+ """ # noqa: D400, D401
# Create the setFields file
- Inicond = of7Initial()
+ Inicond = of7Initial() # noqa: N806
initcode = Inicond.alphacheck(data, path)
if initcode == -1:
return -1
- else:
+ else: # noqa: RET505
alphatext = Inicond.alphatext(data, path)
fname = 'setFieldsDict'
- filepath = os.path.join(path, 'system', fname)
- alphafile = open(filepath, 'w')
+ filepath = os.path.join(path, 'system', fname) # noqa: PTH118
+ alphafile = open(filepath, 'w') # noqa: SIM115, PTH123
alphafile.write(alphatext)
alphafile.close()
@@ -343,60 +343,60 @@ def boundary(self, data, path):
data: all the JSON data
path: Path where the geometry files (STL) needs to be created
- """
+ """ # noqa: D400, D401
# Initialize the patches
patches = ['Entry', 'Exit', 'Top', 'Bottom', 'Right', 'Left']
# Create object for velocity boundary condition
# Get the text for the velocity boundary
# Write the U-file in 0.org
- Uboundary = of7Uboundary()
+ Uboundary = of7Uboundary() # noqa: N806
utext = Uboundary.Utext(data, path, patches)
# Check for boundary conditions here
ecode = Uboundary.Uchecks(data, path, patches)
if ecode == -1:
return -1
- else:
+ else: # noqa: RET505
# Write the U-file if no errors
# Path to the file
fname = 'U'
- filepath = os.path.join(path, '0.org', fname)
- Ufile = open(filepath, 'w')
+ filepath = os.path.join(path, '0.org', fname) # noqa: PTH118
+ Ufile = open(filepath, 'w') # noqa: SIM115, PTH123, N806
Ufile.write(utext)
Ufile.close()
# Create object for pressure boundary condition
# Get the text for the pressure boundary
# Write the p_rgh-file in 0.org
- Prboundary = of7Prboundary()
+ Prboundary = of7Prboundary() # noqa: N806
prtext = Prboundary.Prtext(data, patches)
fname = 'p_rgh'
- filepath = os.path.join(path, '0.org', fname)
- pr_file = open(filepath, 'w')
+ filepath = os.path.join(path, '0.org', fname) # noqa: PTH118
+ pr_file = open(filepath, 'w') # noqa: SIM115, PTH123
pr_file.write(prtext)
pr_file.close()
# Create object for alpha boundary condition
# Get the text for the alpha boundary
# Write the alpha-file in 0.org
- Alpboundary = of7Alpboundary()
- Alptext = Alpboundary.Alptext(data, patches)
+ Alpboundary = of7Alpboundary() # noqa: N806
+ Alptext = Alpboundary.Alptext(data, patches) # noqa: N806
fname = 'alpha.water'
- filepath = os.path.join(path, '0.org', fname)
- Alpfile = open(filepath, 'w')
+ filepath = os.path.join(path, '0.org', fname) # noqa: PTH118
+ Alpfile = open(filepath, 'w') # noqa: SIM115, PTH123, N806
Alpfile.write(Alptext)
Alpfile.close()
# Loop over all the velocity type to see if any
# has a moving wall. If so initialize the
# pointDisplacement file
- PtDboundary = of7PtDboundary()
- ptDcode = PtDboundary.PtDcheck(data, patches)
+ PtDboundary = of7PtDboundary() # noqa: N806
+ ptDcode = PtDboundary.PtDcheck(data, patches) # noqa: N806
if ptDcode == 1:
pdtext = PtDboundary.PtDtext(data, path, patches)
fname = 'pointDisplacement'
- filepath = os.path.join(path, '0.org', fname)
- ptDfile = open(filepath, 'w')
+ filepath = os.path.join(path, '0.org', fname) # noqa: PTH118
+ ptDfile = open(filepath, 'w') # noqa: SIM115, PTH123, N806
ptDfile.write(pdtext)
ptDfile.close()
@@ -411,13 +411,13 @@ def turbulence(self, data, path):
data: all the JSON data
path: Path where the geometry files (STL) needs to be created
- """
+ """ # noqa: D400, D401
# Create the domain decomposition file
- Turb = of7Turbulence()
+ Turb = of7Turbulence() # noqa: N806
turbtext = Turb.turbtext(data)
fname = 'turbulenceProperties'
- filepath = os.path.join(path, 'constant', fname)
- turbfile = open(filepath, 'w')
+ filepath = os.path.join(path, 'constant', fname) # noqa: PTH118
+ turbfile = open(filepath, 'w') # noqa: SIM115, PTH123
turbfile.write(turbtext)
turbfile.close()
@@ -432,13 +432,13 @@ def parallelize(self, data, path):
data: all the JSON data
path: Path where the geometry files (STL) needs to be created
- """
+ """ # noqa: D400, D401
# Create the domain decomposition file
- Decomp = of7Decomp()
+ Decomp = of7Decomp() # noqa: N806
decomptext = Decomp.decomptext(data)
fname = 'decomposeParDict'
- filepath = os.path.join(path, 'system', fname)
- decompfile = open(filepath, 'w')
+ filepath = os.path.join(path, 'system', fname) # noqa: PTH118
+ decompfile = open(filepath, 'w') # noqa: SIM115, PTH123
decompfile.write(decomptext)
decompfile.close()
@@ -456,22 +456,22 @@ def solve(self, data, path):
data: all the JSON data
path: Path where the geometry files (STL) needs to be created
- """
+ """ # noqa: D400, D401
# Create the solver files
- Solve = of7Solve()
+ Solve = of7Solve() # noqa: N806
# fvSchemes
fvschemetext = Solve.fvSchemetext(data)
fname = 'fvSchemes'
- filepath = os.path.join(path, 'system', fname)
- fvschemefile = open(filepath, 'w')
+ filepath = os.path.join(path, 'system', fname) # noqa: PTH118
+ fvschemefile = open(filepath, 'w') # noqa: SIM115, PTH123
fvschemefile.write(fvschemetext)
fvschemefile.close()
# fvSolutions
fvsolntext = Solve.fvSolntext(data)
fname = 'fvSolution'
- filepath = os.path.join(path, 'system', fname)
- fvsolnfile = open(filepath, 'w')
+ filepath = os.path.join(path, 'system', fname) # noqa: PTH118
+ fvsolnfile = open(filepath, 'w') # noqa: SIM115, PTH123
fvsolnfile.write(fvsolntext)
fvsolnfile.close()
@@ -479,18 +479,18 @@ def solve(self, data, path):
ecode = Solve.cdictcheck(data)
if ecode == -1:
return -1
- else:
+ else: # noqa: RET505
cdicttext = Solve.cdicttext(data)
fname = 'controlDict'
- filepath = os.path.join(path, 'system', fname)
- cdictfile = open(filepath, 'w')
+ filepath = os.path.join(path, 'system', fname) # noqa: PTH118
+ cdictfile = open(filepath, 'w') # noqa: SIM115, PTH123
cdictfile.write(cdicttext)
cdictfile.close()
# Create CdictForce
- cdictFtext = Solve.cdictFtext(data)
+ cdictFtext = Solve.cdictFtext(data) # noqa: N806
fname = 'cdictforce'
- cdictFfile = open(fname, 'w')
+ cdictFfile = open(fname, 'w') # noqa: SIM115, PTH123, N806
cdictFfile.write(cdictFtext)
cdictFfile.close()
@@ -505,14 +505,14 @@ def others(self, data, path):
data: all the JSON data
path: Path where the geometry files (STL) needs to be created
- """
+ """ # noqa: D400, D401
# Create the auxiliary files
- Others = of7Others()
+ Others = of7Others() # noqa: N806
# g-file
gfiletext = Others.gfiletext(data)
fname = 'g'
- filepath = os.path.join(path, 'constant', fname)
- gfile = open(filepath, 'w')
+ filepath = os.path.join(path, 'constant', fname) # noqa: PTH118
+ gfile = open(filepath, 'w') # noqa: SIM115, PTH123
gfile.write(gfiletext)
gfile.close()
@@ -526,7 +526,7 @@ def dakota(self, args):
---------
args: all arguments
- """
+ """ # noqa: D400, D401
# Create the solver files
dakota = of7Dakota()
@@ -544,28 +544,28 @@ def postprocessing(self, data, path):
data: all the JSON data
path: Path where the geometry files (STL) needs to be created
- """
+ """ # noqa: D400, D401
# Create the solver files
pprocess = of7Process()
# controlDict
ecode = pprocess.pprocesscheck(data, path)
if ecode == -1:
return -1
- elif ecode == 0:
+ elif ecode == 0: # noqa: RET505
return 0
else:
# sample file
pprocesstext = pprocess.pprocesstext(data, path)
fname = 'sample'
- filepath = os.path.join(fname)
- samplefile = open(filepath, 'w')
+ filepath = os.path.join(fname) # noqa: PTH118
+ samplefile = open(filepath, 'w') # noqa: SIM115, PTH123
samplefile.write(pprocesstext)
samplefile.close()
# Controldict
pprocesstext = pprocess.pprocesscdict(data, path)
fname = 'cdictpp'
- filepath = os.path.join(fname)
- samplefile = open(filepath, 'w')
+ filepath = os.path.join(fname) # noqa: PTH118
+ samplefile = open(filepath, 'w') # noqa: SIM115, PTH123
samplefile.write(pprocesstext)
samplefile.close()
@@ -582,7 +582,7 @@ def cleaning(self, args, path):
---------
args: all arguments
- """
+ """ # noqa: D400, D401
# Create the solver files
cleaner = of7Dakota()
diff --git a/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py b/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py
index 25097a0f1..7c3d584d8 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/osuFlume.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -50,17 +50,17 @@ class osuFlume:
-------
creategeom: Create geometry and STL files
- """
+ """ # noqa: D205, D400, D404
#############################################################
- def creategeom(self, data, path):
+ def creategeom(self, data, path): # noqa: ARG002
"""Creates the geometry for OSU flume
Arguments:
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Number of flume points
numflumepoints = 9
@@ -87,9 +87,9 @@ def creategeom(self, data, path):
# Create temporary file
filename = 'FlumeData.txt'
- if os.path.exists(filename):
- os.remove(filename)
- f = open(filename, 'a')
+ if os.path.exists(filename): # noqa: PTH110
+ os.remove(filename) # noqa: PTH107
+ f = open(filename, 'a') # noqa: SIM115, PTH123
for ii in range(int(numflumepoints)):
f.write(str(nums[2 * ii]) + ',' + str(nums[2 * ii + 1]) + '\n')
f.close()
diff --git a/modules/createEVENT/GeoClawOpenFOAM/userFlume.py b/modules/createEVENT/GeoClawOpenFOAM/userFlume.py
index 723401a97..60ef1373b 100644
--- a/modules/createEVENT/GeoClawOpenFOAM/userFlume.py
+++ b/modules/createEVENT/GeoClawOpenFOAM/userFlume.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001
# LICENSING INFORMATION
####################################################################
"""LICENSE INFORMATION:
@@ -21,7 +21,7 @@
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-"""
+""" # noqa: D400
####################################################################
# AUTHOR INFORMATION
####################################################################
@@ -50,7 +50,7 @@ class userFlume:
-------
creategeom: Create geometry and STL files
- """
+ """ # noqa: D205, D400, D404
#############################################################
def creategeom(self, data, path):
@@ -60,7 +60,7 @@ def creategeom(self, data, path):
---------
data: all the JSON data
- """
+ """ # noqa: D400, D401
# Create a utilities object
hydroutil = hydroUtils()
@@ -82,9 +82,9 @@ def creategeom(self, data, path):
# Create temporary file
filename = 'FlumeData.txt'
- if os.path.exists(filename):
- os.remove(filename)
- f = open(filename, 'a')
+ if os.path.exists(filename): # noqa: PTH110
+ os.remove(filename) # noqa: PTH107
+ f = open(filename, 'a') # noqa: SIM115, PTH123
for ii in range(int(numflumesegs)):
f.write(str(nums[2 * ii]) + ',' + str(nums[2 * ii + 1]) + '\n')
f.close()
diff --git a/modules/createEVENT/HighRiseTPU/HighRiseTPU.py b/modules/createEVENT/HighRiseTPU/HighRiseTPU.py
index 1594eb1c4..438520fe5 100644
--- a/modules/createEVENT/HighRiseTPU/HighRiseTPU.py
+++ b/modules/createEVENT/HighRiseTPU/HighRiseTPU.py
@@ -1,4 +1,4 @@
-# python code to open the TPU .mat file
+# python code to open the TPU .mat file # noqa: INP001, D100
# and put data into a SimCenter JSON file for
# wind tunnel data
@@ -7,21 +7,21 @@
import scipy.io as sio
-inputArgs = sys.argv
+inputArgs = sys.argv # noqa: N816
-print('Number of arguments: %d' % len(sys.argv))
-print('The arguments are: %s' % str(sys.argv))
+print('Number of arguments: %d' % len(sys.argv)) # noqa: T201
+print('The arguments are: %s' % str(sys.argv)) # noqa: T201, UP031
# set filenames
-matFileIN = sys.argv[1]
-jsonFileOUT = sys.argv[2]
+matFileIN = sys.argv[1] # noqa: N816
+jsonFileOUT = sys.argv[2] # noqa: N816
-dataDir = os.getcwd()
-scriptDir = os.path.dirname(os.path.realpath(__file__))
+dataDir = os.getcwd() # noqa: PTH109, N816
+scriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N816
-def parseTPU_HighRise_MatFile(matFileIn, windFileOutName):
- file = open(windFileOutName, 'w', encoding='utf-8')
+def parseTPU_HighRise_MatFile(matFileIn, windFileOutName): # noqa: N802, N803, D103
+ file = open(windFileOutName, 'w', encoding='utf-8') # noqa: SIM115, PTH123
file.write('{\n')
mat_contents = sio.loadmat(matFileIn)
depth = mat_contents['Building_depth'][0][0]
@@ -31,42 +31,42 @@ def parseTPU_HighRise_MatFile(matFileIn, windFileOutName):
frequency = mat_contents['Sample_frequency'][0][0]
angle = mat_contents['Wind_direction_angle'][0][0]
# uH = mat_contents['Uh_AverageWindSpeed'][0][0];
- uH = float(mat_contents['Uh_AverageWindSpeed'][0])
- print(uH)
- print(depth)
- print(height)
- file.write('"windSpeed":%f,' % uH)
- file.write('"depth":%f,' % depth)
- file.write('"height":%f,' % height)
- file.write('"breadth":%f,' % breadth)
- file.write('"period":%f,' % period)
+ uH = float(mat_contents['Uh_AverageWindSpeed'][0]) # noqa: N806
+ print(uH) # noqa: T201
+ print(depth) # noqa: T201
+ print(height) # noqa: T201
+ file.write('"windSpeed":%f,' % uH) # noqa: UP031
+ file.write('"depth":%f,' % depth) # noqa: UP031
+ file.write('"height":%f,' % height) # noqa: UP031
+ file.write('"breadth":%f,' % breadth) # noqa: UP031
+ file.write('"period":%f,' % period) # noqa: UP031
file.write('"units":{"length":"m","time":"sec"},')
- file.write('"frequency":%f,' % frequency)
- file.write('"incidenceAngle":%f,' % angle)
+ file.write('"frequency":%f,' % frequency) # noqa: UP031
+ file.write('"incidenceAngle":%f,' % angle) # noqa: UP031
file.write('"tapLocations": [')
locations = mat_contents['Location_of_measured_points']
- numLocations = locations.shape[1]
+ numLocations = locations.shape[1] # noqa: N806
# get xMax and yMax .. assuming first sensor is 1m from building edge
# location on faces cannot be obtained from the inputs, at least not with
# current documentation, awaing email from TPU
- xMax = max(locations[0]) + 1
- yMax = max(locations[1]) + 1
+ xMax = max(locations[0]) + 1 # noqa: N806, F841
+ yMax = max(locations[1]) + 1 # noqa: N806, F841
for loc in range(numLocations):
tag = locations[2][loc]
- xLoc = locations[0][loc]
- yLoc = locations[1][loc]
+ xLoc = locations[0][loc] # noqa: N806
+ yLoc = locations[1][loc] # noqa: N806
face = locations[3][loc]
- X = xLoc
- Y = yLoc
- if face == 2:
- xLoc = X - breadth
- elif face == 3:
- xLoc = X - breadth - depth
- elif face == 4:
- xLoc = X - 2 * breadth - depth
+ X = xLoc # noqa: N806
+ Y = yLoc # noqa: N806, F841
+ if face == 2: # noqa: PLR2004
+ xLoc = X - breadth # noqa: N806
+ elif face == 3: # noqa: PLR2004
+ xLoc = X - breadth - depth # noqa: N806
+ elif face == 4: # noqa: PLR2004
+ xLoc = X - 2 * breadth - depth # noqa: N806
if loc == numLocations - 1:
file.write(
@@ -79,16 +79,16 @@ def parseTPU_HighRise_MatFile(matFileIn, windFileOutName):
file.write(',"pressureCoefficients": [')
coefficients = mat_contents['Wind_pressure_coefficients']
- numLocations = coefficients.shape[1]
- numValues = coefficients.shape[0]
+ numLocations = coefficients.shape[1] # noqa: N806
+ numValues = coefficients.shape[0] # noqa: N806
for loc in range(numLocations):
file.write('{"id": %d , "data":[' % (loc + 1))
for i in range(numValues - 1):
- file.write('%f,' % coefficients[i, loc])
+ file.write('%f,' % coefficients[i, loc]) # noqa: UP031
if loc != numLocations - 1:
- file.write('%f]},' % coefficients[numValues - 1, loc])
+ file.write('%f]},' % coefficients[numValues - 1, loc]) # noqa: UP031
else:
- file.write('%f]}]' % coefficients[numValues - 1, loc])
+ file.write('%f]}]' % coefficients[numValues - 1, loc]) # noqa: UP031
file.write('}')
file.close()
diff --git a/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py b/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py
index 62c8fed01..7962f92cd 100644
--- a/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py
+++ b/modules/createEVENT/IsolatedBuildingCFD/IsolatedBuildingCFD.py
@@ -1,25 +1,25 @@
-import argparse
+import argparse # noqa: INP001, D100
import json
-class FloorForces:
+class FloorForces: # noqa: D101
def __init__(self):
self.X = [0]
self.Y = [0]
self.Z = [0]
-def directionToDof(direction):
- """Converts direction to degree of freedom"""
- directioMap = {'X': 1, 'Y': 2, 'Z': 3}
+def directionToDof(direction): # noqa: N802
+ """Converts direction to degree of freedom""" # noqa: D400, D401
+ directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806
return directioMap[direction]
-def addFloorForceToEvent(patternsArray, force, direction, floor):
- """Add force (one component) time series and pattern in the event file"""
- seriesName = 'WindForceSeries_' + str(floor) + direction
- patternName = 'WindForcePattern_' + str(floor) + direction
+def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803
+ """Add force (one component) time series and pattern in the event file""" # noqa: D400
+ seriesName = 'WindForceSeries_' + str(floor) + direction # noqa: N806
+ patternName = 'WindForcePattern_' + str(floor) + direction # noqa: N806
pattern = {
'name': patternName,
'timeSeries': seriesName,
@@ -31,10 +31,10 @@ def addFloorForceToEvent(patternsArray, force, direction, floor):
patternsArray.append(pattern)
-def writeEVENT(forces, eventFilePath):
- """This method writes the EVENT.json file"""
- patternsArray = []
- windEventJson = {
+def writeEVENT(forces, eventFilePath): # noqa: N802, N803
+ """This method writes the EVENT.json file""" # noqa: D400, D401, D404
+ patternsArray = [] # noqa: N806
+ windEventJson = { # noqa: N806
'type': 'Wind',
'subtype': 'IsolatedBuildingCFD',
'pattern': patternsArray,
@@ -44,20 +44,20 @@ def writeEVENT(forces, eventFilePath):
}
# Creating the event dictionary that will be used to export the EVENT json file
- eventDict = {'randomVariables': [], 'Events': [windEventJson]}
+ eventDict = {'randomVariables': [], 'Events': [windEventJson]} # noqa: N806
# Adding floor forces
- for floorForces in forces:
+ for floorForces in forces: # noqa: N806
floor = forces.index(floorForces) + 1
addFloorForceToEvent(patternsArray, floorForces.X, 'X', floor)
addFloorForceToEvent(patternsArray, floorForces.Y, 'Y', floor)
- with open(eventFilePath, 'w') as eventsFile:
+ with open(eventFilePath, 'w') as eventsFile: # noqa: PTH123, N806
json.dump(eventDict, eventsFile)
-def GetFloorsCount(BIMFilePath):
- with open(BIMFilePath, encoding='utf-8') as BIMFile:
+def GetFloorsCount(BIMFilePath): # noqa: N802, N803, D103
+ with open(BIMFilePath, encoding='utf-8') as BIMFile: # noqa: PTH123, N806
bim = json.load(BIMFile)
return int(bim['GeneralInformation']['stories'])
@@ -78,11 +78,11 @@ def GetFloorsCount(BIMFilePath):
# parsing arguments
arguments, unknowns = parser.parse_known_args()
- if arguments.getRV == True:
+ if arguments.getRV == True: # noqa: E712
# Read the number of floors
- floorsCount = GetFloorsCount(arguments.filenameAIM)
+ floorsCount = GetFloorsCount(arguments.filenameAIM) # noqa: N816
forces = []
- for i in range(floorsCount):
- forces.append(FloorForces())
+ for i in range(floorsCount): # noqa: B007
+ forces.append(FloorForces()) # noqa: PERF401
# write the event file
writeEVENT(forces, arguments.filenameEVENT)
diff --git a/modules/createEVENT/IsolatedBuildingCFD/foam_dict_reader.py b/modules/createEVENT/IsolatedBuildingCFD/foam_dict_reader.py
index 8e9a71b33..8d7baf058 100644
--- a/modules/createEVENT/IsolatedBuildingCFD/foam_dict_reader.py
+++ b/modules/createEVENT/IsolatedBuildingCFD/foam_dict_reader.py
@@ -1,22 +1,22 @@
-"""This script contains functions for reading and writing OpenFoam dictionaries."""
+"""This script contains functions for reading and writing OpenFoam dictionaries.""" # noqa: INP001, D404
import os
import numpy as np
-def find_keyword_line(dict_lines, keyword):
+def find_keyword_line(dict_lines, keyword): # noqa: D103
start_line = -1
count = 0
for line in dict_lines:
- l = line.lstrip(' ')
+ l = line.lstrip(' ') # noqa: E741
if l.startswith(keyword):
start_line = count
break
- count += 1
+ count += 1 # noqa: SIM113
return start_line
@@ -28,11 +28,11 @@ def write_foam_field(field, file_name):
vectorField,
tensorField,
symmTensorField
- """
- if os.path.exists(file_name):
- os.remove(file_name)
+ """ # noqa: D205, D400, D401
+ if os.path.exists(file_name): # noqa: PTH110
+ os.remove(file_name) # noqa: PTH107
- foam_file = open(file_name, 'w+')
+ foam_file = open(file_name, 'w+') # noqa: SIM115, PTH123
size = np.shape(field)
@@ -53,11 +53,11 @@ def write_foam_field(field, file_name):
def write_scalar_field(field, file_name):
"""Writes a given one dimensional numpy array to OpenFOAM
scalar field format.
- """
- if os.path.exists(file_name):
- os.remove(file_name)
+ """ # noqa: D205, D401
+ if os.path.exists(file_name): # noqa: PTH110
+ os.remove(file_name) # noqa: PTH107
- foam_file = open(file_name, 'w+')
+ foam_file = open(file_name, 'w+') # noqa: SIM115, PTH123
size = np.shape(field)
diff --git a/modules/createEVENT/IsolatedBuildingCFD/process_output_data.py b/modules/createEVENT/IsolatedBuildingCFD/process_output_data.py
index b977b9d9b..4203748b5 100644
--- a/modules/createEVENT/IsolatedBuildingCFD/process_output_data.py
+++ b/modules/createEVENT/IsolatedBuildingCFD/process_output_data.py
@@ -3,16 +3,16 @@
code creates pressure probes for the main simulation. Three types of
probes are created.
-"""
+""" # noqa: INP001, D404
import json
import sys
-import CWE as cwe
+import CWE as cwe # noqa: N811
import numpy as np
-def write_wind_profiles(case_path):
+def write_wind_profiles(case_path): # noqa: D103
inf_path = (
case_path + '/constant/boundaryData/windProfile/sampledData/verticalProfile/'
)
@@ -20,7 +20,7 @@ def write_wind_profiles(case_path):
inf = cwe.VelocityData('cfd', inf_path, start_time=None, end_time=None)
# Read JSON data for turbulence model
- wc_json_file = open(case_path + '/constant/simCenter/windCharacteristics.json')
+ wc_json_file = open(case_path + '/constant/simCenter/windCharacteristics.json') # noqa: SIM115, PTH123
# Returns JSON object as a dictionary
wind_data = json.load(wc_json_file, 'r', encoding='utf-8')
@@ -36,10 +36,10 @@ def write_wind_profiles(case_path):
prof[:, 3] = inf.L[:, 0]
# Wind velocity at roof height
- H_loc = np.argmin(np.abs(inf.z - building_height))
+ H_loc = np.argmin(np.abs(inf.z - building_height)) # noqa: N806
# U, v, w in at roof height
- Uh = inf.U[H_loc, :, :].T
+ Uh = inf.U[H_loc, :, :].T # noqa: N806
s_uh = []
@@ -49,7 +49,7 @@ def write_wind_profiles(case_path):
s_uh.insert(0, f)
- Suhout = np.asarray(s_uh, dtype=np.float32).T
+ Suhout = np.asarray(s_uh, dtype=np.float32).T # noqa: N806
write_precision = 6
fmt = f'%.{write_precision}e'
@@ -61,7 +61,7 @@ def write_wind_profiles(case_path):
np.savetxt(s_uh_path, Suhout, fmt=fmt)
-def write_wind_loads(case_path):
+def write_wind_loads(case_path): # noqa: D103
# Write base forces
base_forces_path = case_path + '/postProcessing/baseForces/0/forces.dat'
base_o, base_t, base_f, base_m = cwe.read_forces_OF10(base_forces_path)
@@ -82,9 +82,9 @@ def write_wind_loads(case_path):
out_base_path = case_path + '/constant/simCenter/output/baseForces.txt'
- out_story_path_Fx = case_path + '/constant/simCenter/output/storyForcesFx.txt'
- out_story_path_Fy = case_path + '/constant/simCenter/output/storyForcesFy.txt'
- out_story_path_Mz = case_path + '/constant/simCenter/output/storyForcesMz.txt'
+ out_story_path_Fx = case_path + '/constant/simCenter/output/storyForcesFx.txt' # noqa: N806
+ out_story_path_Fy = case_path + '/constant/simCenter/output/storyForcesFy.txt' # noqa: N806
+ out_story_path_Mz = case_path + '/constant/simCenter/output/storyForcesMz.txt' # noqa: N806
np.savetxt(out_base_path, base_forces, fmt=fmt)
diff --git a/modules/createEVENT/IsolatedBuildingCFD/setup_case.py b/modules/createEVENT/IsolatedBuildingCFD/setup_case.py
index 84460df6f..b45bb39aa 100644
--- a/modules/createEVENT/IsolatedBuildingCFD/setup_case.py
+++ b/modules/createEVENT/IsolatedBuildingCFD/setup_case.py
@@ -1,7 +1,7 @@
"""This script writes BC and initial condition, and setups the OpenFoam case
directory.
-"""
+""" # noqa: INP001, D205, D404
import json
import os
@@ -12,9 +12,9 @@
from stl import mesh
-def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
+def write_block_mesh_dict(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -27,12 +27,12 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
normalization_type = geom_data['normalizationType']
origin = np.array(geom_data['origin'])
scale = geom_data['geometricScale']
- H = geom_data['buildingHeight'] / scale # convert to model-scale
+ H = geom_data['buildingHeight'] / scale # convert to model-scale # noqa: N806
- Lx = geom_data['domainLength']
- Ly = geom_data['domainWidth']
- Lz = geom_data['domainHeight']
- Lf = geom_data['fetchLength']
+ Lx = geom_data['domainLength'] # noqa: N806
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lz = geom_data['domainHeight'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
x_cells = mesh_data['xNumCells']
y_cells = mesh_data['yNumCells']
@@ -66,10 +66,10 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
length_unit = json_data['lengthUnit']
if normalization_type == 'Relative':
- Lx = Lx * H
- Ly = Ly * H
- Lz = Lz * H
- Lf = Lf * H
+ Lx = Lx * H # noqa: N806
+ Ly = Ly * H # noqa: N806
+ Lz = Lz * H # noqa: N806
+ Lf = Lf * H # noqa: N806
origin = origin * H
x_min = -Lf - origin[0]
@@ -81,7 +81,7 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
z_max = z_min + Lz
# Open the template blockMeshDict (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/blockMeshDictTemplate')
+ dict_file = open(template_dict_path + '/blockMeshDictTemplate') # noqa: SIM115, PTH123
# Export to OpenFOAM probe format
dict_lines = dict_file.readlines()
@@ -126,19 +126,19 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
write_file_name = case_path + '/system/blockMeshDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_building_stl_file(input_json_path, case_path):
+def write_building_stl_file(input_json_path, case_path): # noqa: C901, D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -167,9 +167,9 @@ def write_building_stl_file(input_json_path, case_path):
convert_to_meters = 0.0254
# Convert from full-scale to model-scale
- B = convert_to_meters * geom_data['buildingWidth'] / scale
- D = convert_to_meters * geom_data['buildingDepth'] / scale
- H = convert_to_meters * geom_data['buildingHeight'] / scale
+ B = convert_to_meters * geom_data['buildingWidth'] / scale # noqa: N806
+ D = convert_to_meters * geom_data['buildingDepth'] / scale # noqa: N806
+ H = convert_to_meters * geom_data['buildingHeight'] / scale # noqa: N806
normalization_type = geom_data['normalizationType']
@@ -245,9 +245,9 @@ def write_building_stl_file(input_json_path, case_path):
bldg.save(case_path + '/constant/geometry/building.stl', mode=fmt)
-def import_building_stl_file(input_json_path, case_path):
+def import_building_stl_file(input_json_path, case_path): # noqa: D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -259,7 +259,7 @@ def import_building_stl_file(input_json_path, case_path):
stl_path = json_data['GeometricData']['importedSTLPath']
scale_factor = json_data['GeometricData']['stlScaleFactor']
recenter = json_data['GeometricData']['recenterToOrigin']
- use_stl_dimension = json_data['GeometricData']['useSTLDimensions']
+ use_stl_dimension = json_data['GeometricData']['useSTLDimensions'] # noqa: F841
account_wind_direction = json_data['GeometricData']['accountWindDirection']
origin = np.array(json_data['GeometricData']['origin'])
wind_dxn = json_data['GeometricData']['windDirection']
@@ -290,7 +290,7 @@ def import_building_stl_file(input_json_path, case_path):
json_object = json.dumps(stl_summary, indent=4)
# Writing to sample.json
- with open(
+ with open( # noqa: PTH123
input_json_path + '/stlGeometrySummary.json', 'w', encoding='utf-8'
) as outfile:
outfile.write(json_object)
@@ -322,9 +322,9 @@ def import_building_stl_file(input_json_path, case_path):
bldg_mesh.save(case_path + '/constant/geometry/building.stl', mode=fmt)
-def write_surfaceFeaturesDict_file(input_json_path, template_dict_path, case_path):
+def write_surfaceFeaturesDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -334,7 +334,7 @@ def write_surfaceFeaturesDict_file(input_json_path, template_dict_path, case_pat
building_stl_name = domain_data['buildingSTLName']
# Open the template blockMeshDict (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/surfaceFeaturesDictTemplate')
+ dict_file = open(template_dict_path + '/surfaceFeaturesDictTemplate') # noqa: SIM115, PTH123
# Export to OpenFOAM probe format
dict_lines = dict_file.readlines()
@@ -347,19 +347,19 @@ def write_surfaceFeaturesDict_file(input_json_path, template_dict_path, case_pat
# Write edited dict to file
write_file_name = case_path + '/system/surfaceFeaturesDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
+def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -370,12 +370,12 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
geom_data = json_data['GeometricData']
scale = geom_data['geometricScale']
- H = geom_data['buildingHeight'] / scale # convert to model-scale
+ H = geom_data['buildingHeight'] / scale # convert to model-scale # noqa: N806
- Lx = geom_data['domainLength']
- Ly = geom_data['domainWidth']
- Lz = geom_data['domainHeight']
- Lf = geom_data['fetchLength']
+ Lx = geom_data['domainLength'] # noqa: N806
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lz = geom_data['domainHeight'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
normalization_type = geom_data['normalizationType']
origin = np.array(geom_data['origin'])
@@ -383,7 +383,7 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
building_stl_name = mesh_data['buildingSTLName']
num_cells_between_levels = mesh_data['numCellsBetweenLevels']
resolve_feature_angle = mesh_data['resolveFeatureAngle']
- num_processors = mesh_data['numProcessors']
+ num_processors = mesh_data['numProcessors'] # noqa: F841
refinement_boxes = mesh_data['refinementBoxes']
@@ -404,10 +404,10 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
prism_layer_relative_size = 'on'
if normalization_type == 'Relative':
- Lx = Lx * H
- Ly = Ly * H
- Lz = Lz * H
- Lf = Lf * H
+ Lx = Lx * H # noqa: N806
+ Ly = Ly * H # noqa: N806
+ Lz = Lz * H # noqa: N806
+ Lf = Lf * H # noqa: N806
origin = origin * H
for i in range(len(refinement_boxes)):
@@ -420,14 +420,14 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
y_min = -Ly / 2.0 - origin[1]
z_min = 0.0 - origin[2]
- x_max = x_min + Lx
+ x_max = x_min + Lx # noqa: F841
y_max = y_min + Ly
- z_max = z_min + Lz
+ z_max = z_min + Lz # noqa: F841
inside_point = [x_min + Lf / 2.0, (y_min + y_max) / 2.0, H]
# Open the template blockMeshDict (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/snappyHexMeshDictTemplate')
+ dict_file = open(template_dict_path + '/snappyHexMeshDictTemplate') # noqa: SIM115, PTH123
# Export to OpenFOAM probe format
dict_lines = dict_file.readlines()
@@ -570,19 +570,19 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/snappyHexMeshDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_U_file(input_json_path, template_dict_path, case_path):
+def write_U_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -591,17 +591,17 @@ def write_U_file(input_json_path, template_dict_path, case_path):
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- inlet_BC_type = boundary_data['inletBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- building_BC_type = boundary_data['buildingBoundaryCondition']
+ inlet_BC_type = boundary_data['inletBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ building_BC_type = boundary_data['buildingBoundaryCondition'] # noqa: N806, F841
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/UFileTemplate')
+ dict_file = open(template_dict_path + '/UFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -699,10 +699,10 @@ def write_U_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/U'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+', encoding='utf-8')
+ output_file = open(write_file_name, 'w+', encoding='utf-8') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
@@ -710,9 +710,9 @@ def write_U_file(input_json_path, template_dict_path, case_path):
output_file.close()
-def write_p_file(input_json_path, template_dict_path, case_path):
+def write_p_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -720,11 +720,11 @@ def write_p_file(input_json_path, template_dict_path, case_path):
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/pFileTemplate')
+ dict_file = open(template_dict_path + '/pFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -796,19 +796,19 @@ def write_p_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/p'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_nut_file(input_json_path, template_dict_path, case_path):
+def write_nut_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -817,17 +817,17 @@ def write_nut_file(input_json_path, template_dict_path, case_path):
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
- building_BC_type = boundary_data['buildingBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
+ building_BC_type = boundary_data['buildingBoundaryCondition'] # noqa: N806
# wind_speed = wind_data['roofHeightWindSpeed']
# building_height = wind_data['buildingHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/nutFileTemplate')
+ dict_file = open(template_dict_path + '/nutFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -928,18 +928,18 @@ def write_nut_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/nut'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_epsilon_file(input_json_path, template_dict_path, case_path):
+def write_epsilon_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -948,17 +948,17 @@ def write_epsilon_file(input_json_path, template_dict_path, case_path):
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
- building_BC_type = boundary_data['buildingBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
+ building_BC_type = boundary_data['buildingBoundaryCondition'] # noqa: N806
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/epsilonFileTemplate')
+ dict_file = open(template_dict_path + '/epsilonFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1075,10 +1075,10 @@ def write_epsilon_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/epsilon'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
@@ -1086,9 +1086,9 @@ def write_epsilon_file(input_json_path, template_dict_path, case_path):
output_file.close()
-def write_k_file(input_json_path, template_dict_path, case_path):
+def write_k_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -1097,17 +1097,17 @@ def write_k_file(input_json_path, template_dict_path, case_path):
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
- building_BC_type = boundary_data['buildingBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
+ building_BC_type = boundary_data['buildingBoundaryCondition'] # noqa: N806
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/kFileTemplate')
+ dict_file = open(template_dict_path + '/kFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1115,7 +1115,7 @@ def write_k_file(input_json_path, template_dict_path, case_path):
# BC and initial condition (you may need to scale to model scale)
# k0 = 1.3 #not in model scale
- I = 0.1
+ I = 0.1 # noqa: N806, E741
k0 = 1.5 * (I * wind_speed) ** 2
# Internal Field #########################
@@ -1216,19 +1216,19 @@ def write_k_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/k'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_controlDict_file(input_json_path, template_dict_path, case_path):
+def write_controlDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -1256,7 +1256,7 @@ def write_controlDict_file(input_json_path, template_dict_path, case_path):
purge_write = 3
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/controlDictTemplate')
+ dict_file = open(template_dict_path + '/controlDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1328,19 +1328,19 @@ def write_controlDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/controlDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_fvSolution_file(input_json_path, template_dict_path, case_path):
+def write_fvSolution_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -1355,7 +1355,7 @@ def write_fvSolution_file(input_json_path, template_dict_path, case_path):
num_outer_correctors = ns_data['numOuterCorrectors']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/fvSolutionTemplate')
+ dict_file = open(template_dict_path + '/fvSolutionTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1390,23 +1390,23 @@ def write_fvSolution_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/fvSolution'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_generated_pressure_probes_file(
+def write_generated_pressure_probes_file( # noqa: D103
input_json_path,
template_dict_path,
case_path,
):
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -1424,7 +1424,7 @@ def write_generated_pressure_probes_file(
pressure_write_interval = rm_data['pressureWriteInterval']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/probeTemplate')
+ dict_file = open(template_dict_path + '/probeTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1463,23 +1463,23 @@ def write_generated_pressure_probes_file(
# Write edited dict to file
write_file_name = case_path + '/system/generatedPressureSamplingPoints'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_imported_pressure_probes_file(
+def write_imported_pressure_probes_file( # noqa: D103
input_json_path,
template_dict_path,
case_path,
):
# Read JSON data
- with open(input_json_path + '/IsolatedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/IsolatedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1495,7 +1495,7 @@ def write_imported_pressure_probes_file(
pressure_write_interval = rm_data['pressureWriteInterval']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/probeTemplate')
+ dict_file = open(template_dict_path + '/probeTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1538,19 +1538,19 @@ def write_imported_pressure_probes_file(
# Write edited dict to file
write_file_name = case_path + '/system/importedPressureSamplingPoints'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_base_forces_file(input_json_path, template_dict_path, case_path):
+def write_base_forces_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -1560,14 +1560,14 @@ def write_base_forces_file(input_json_path, template_dict_path, case_path):
# Returns JSON object as a dictionary
rm_data = json_data['resultMonitoring']
- num_stories = rm_data['numStories']
- floor_height = rm_data['floorHeight']
+ num_stories = rm_data['numStories'] # noqa: F841
+ floor_height = rm_data['floorHeight'] # noqa: F841
center_of_rotation = rm_data['centerOfRotation']
base_load_write_interval = rm_data['baseLoadWriteInterval']
- monitor_base_load = rm_data['monitorBaseLoad']
+ monitor_base_load = rm_data['monitorBaseLoad'] # noqa: F841
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/baseForcesTemplate')
+ dict_file = open(template_dict_path + '/baseForcesTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1593,10 +1593,10 @@ def write_base_forces_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/baseForces'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
@@ -1604,9 +1604,9 @@ def write_base_forces_file(input_json_path, template_dict_path, case_path):
output_file.close()
-def write_story_forces_file(input_json_path, template_dict_path, case_path):
+def write_story_forces_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -1617,13 +1617,13 @@ def write_story_forces_file(input_json_path, template_dict_path, case_path):
rm_data = json_data['resultMonitoring']
num_stories = rm_data['numStories']
- floor_height = rm_data['floorHeight']
+ floor_height = rm_data['floorHeight'] # noqa: F841
center_of_rotation = rm_data['centerOfRotation']
story_load_write_interval = rm_data['storyLoadWriteInterval']
- monitor_base_load = rm_data['monitorBaseLoad']
+ monitor_base_load = rm_data['monitorBaseLoad'] # noqa: F841
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/storyForcesTemplate')
+ dict_file = open(template_dict_path + '/storyForcesTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1657,10 +1657,10 @@ def write_story_forces_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/storyForces'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
@@ -1668,9 +1668,9 @@ def write_story_forces_file(input_json_path, template_dict_path, case_path):
output_file.close()
-def write_momentumTransport_file(input_json_path, template_dict_path, case_path):
+def write_momentumTransport_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -1679,12 +1679,12 @@ def write_momentumTransport_file(input_json_path, template_dict_path, case_path)
turb_data = json_data['turbulenceModeling']
simulation_type = turb_data['simulationType']
- RANS_type = turb_data['RANSModelType']
- LES_type = turb_data['LESModelType']
- DES_type = turb_data['DESModelType']
+ RANS_type = turb_data['RANSModelType'] # noqa: N806
+ LES_type = turb_data['LESModelType'] # noqa: N806
+ DES_type = turb_data['DESModelType'] # noqa: N806
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/momentumTransportTemplate')
+ dict_file = open(template_dict_path + '/momentumTransportTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1716,19 +1716,19 @@ def write_momentumTransport_file(input_json_path, template_dict_path, case_path)
# Write edited dict to file
write_file_name = case_path + '/constant/momentumTransport'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_physicalProperties_file(input_json_path, template_dict_path, case_path):
+def write_physicalProperties_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -1739,7 +1739,7 @@ def write_physicalProperties_file(input_json_path, template_dict_path, case_path
kinematic_viscosity = wc_data['kinematicViscosity']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/physicalPropertiesTemplate')
+ dict_file = open(template_dict_path + '/physicalPropertiesTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1751,18 +1751,18 @@ def write_physicalProperties_file(input_json_path, template_dict_path, case_path
# Write edited dict to file
write_file_name = case_path + '/constant/physicalProperties'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_transportProperties_file(input_json_path, template_dict_path, case_path):
+def write_transportProperties_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -1773,7 +1773,7 @@ def write_transportProperties_file(input_json_path, template_dict_path, case_pat
kinematic_viscosity = wc_data['kinematicViscosity']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/transportPropertiesTemplate')
+ dict_file = open(template_dict_path + '/transportPropertiesTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1785,19 +1785,19 @@ def write_transportProperties_file(input_json_path, template_dict_path, case_pat
# Write edited dict to file
write_file_name = case_path + '/constant/transportProperties'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
+def write_fvSchemes_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(
+ with open( # noqa: PTH123
input_json_path + '/IsolatedBuildingCFD.json', encoding='utf-8'
) as json_file:
json_data = json.load(json_file)
@@ -1808,7 +1808,7 @@ def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
simulation_type = turb_data['simulationType']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + f'/fvSchemesTemplate{simulation_type}')
+ dict_file = open(template_dict_path + f'/fvSchemesTemplate{simulation_type}') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1816,19 +1816,19 @@ def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/fvSchemes'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
+def write_decomposeParDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/IsolatedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/IsolatedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1837,7 +1837,7 @@ def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
num_processors = ns_data['numProcessors']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/decomposeParDictTemplate')
+ dict_file = open(template_dict_path + '/decomposeParDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1857,22 +1857,22 @@ def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/decomposeParDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
+def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/IsolatedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/IsolatedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
- fMax = 200.0
+ fMax = 200.0 # noqa: N806
# Returns JSON object as a dictionary
wc_data = json_data['windCharacteristics']
@@ -1885,7 +1885,7 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
duration = duration * 1.010
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/DFSRTurbDictTemplate')
+ dict_file = open(template_dict_path + '/DFSRTurbDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1913,10 +1913,10 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/constant/DFSRTurbDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
@@ -1926,9 +1926,9 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
def write_boundary_data_files(input_json_path, case_path):
"""This functions writes wind profile files in "constant/boundaryData/inlet"
if TInf options are used for the simulation.
- """
+ """ # noqa: D205, D401, D404
# Read JSON data
- with open(input_json_path + '/IsolatedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/IsolatedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1949,12 +1949,12 @@ def write_boundary_data_files(input_json_path, case_path):
origin = np.array(geom_data['origin'])
- Ly = geom_data['domainWidth']
- Lf = geom_data['fetchLength']
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
if norm_type == 'Relative':
- Ly *= building_height
- Lf *= building_height
+ Ly *= building_height # noqa: N806
+ Lf *= building_height # noqa: N806
x_min = -Lf - origin[0]
y_min = -Ly / 2.0 - origin[1]
@@ -1989,7 +1989,7 @@ def write_boundary_data_files(input_json_path, case_path):
case_path = sys.argv[3]
# Read JSON data
- with open(input_json_path + '/IsolatedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/IsolatedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
diff --git a/modules/createEVENT/Istanbul/IstanbulApp.py b/modules/createEVENT/Istanbul/IstanbulApp.py
index 77deaab59..68ff5d3c6 100644
--- a/modules/createEVENT/Istanbul/IstanbulApp.py
+++ b/modules/createEVENT/Istanbul/IstanbulApp.py
@@ -1,39 +1,39 @@
-import json
+import json # noqa: INP001, D100
import os
import time
from datetime import datetime
from subprocess import PIPE, run
# change the directory to the current directory
-os.chdir(os.path.dirname(os.path.realpath(__file__)))
+os.chdir(os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
# %%
# helper function to call the tapis command
-def call(command):
+def call(command): # noqa: D103
command = command.split()
command.append('-f')
command.append('json')
- result = run(command, stdout=PIPE, stderr=PIPE, text=True, check=False)
+ result = run(command, stdout=PIPE, stderr=PIPE, text=True, check=False) # noqa: S603, UP022
result = json.loads(result.stdout)
- return result
+ return result # noqa: RET504
# %%
-def Submit_tapis_job():
- with open('TapisFiles/information.json') as file:
+def Submit_tapis_job(): # noqa: N802, D103
+ with open('TapisFiles/information.json') as file: # noqa: PTH123
information = json.load(file)
file.close()
profile = call('tapis profiles show self')
username = profile['username']
email = profile['email']
- savingDirectory = information['directory']
+ savingDirectory = information['directory'] # noqa: N806
- if not os.path.exists(savingDirectory):
- os.makedirs(savingDirectory)
+ if not os.path.exists(savingDirectory): # noqa: PTH110
+ os.makedirs(savingDirectory) # noqa: PTH103
- print('Uploading files to designsafe storage')
+ print('Uploading files to designsafe storage') # noqa: T201
call(
f'tapis files mkdir agave://designsafe.storage.default/{username}/ physics_based'
)
@@ -67,10 +67,10 @@ def Submit_tapis_job():
}
# Generate a timestamp to append to the job name an
- timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
+ timestamp = datetime.now().strftime('%Y%m%d%H%M%S') # noqa: DTZ005
jobname = f'PhysicsBasedMotion_Istanbul_{username}_{timestamp}'
- print('Submitting job')
+ print('Submitting job') # noqa: T201
jobdict['name'] = jobname
jobdict['inputs']['inputDirectory'] = (
f'agave://designsafe.storage.default/{username}/physics_based/Istanbul/'
@@ -79,7 +79,7 @@ def Submit_tapis_job():
# submit the job
jobfile = './TapisFiles/job.json'
- json.dump(jobdict, open(jobfile, 'w'), indent=2)
+ json.dump(jobdict, open(jobfile, 'w'), indent=2) # noqa: SIM115, PTH123
res = call(f'tapis jobs submit -F {jobfile}')
# delete the job file
@@ -94,29 +94,29 @@ def Submit_tapis_job():
status = call(f'tapis jobs status {jobid} ')['status']
if count == 0:
last_status = status
- print('Job status: ', status)
+ print('Job status: ', status) # noqa: T201
count += 1
if last_status != status:
- print('Job status: ', status)
+ print('Job status: ', status) # noqa: T201
last_status = status
if status == 'FAILED':
- print('Job failed')
+ print('Job failed') # noqa: T201
break
time.sleep(10)
# # %%
# # %%
- print('Downloading extracted motions')
- archivePath = call(f'tapis jobs show {jobid}')['archivePath']
- archivePath = f'agave://designsafe.storage.default/{archivePath}/Istanbul'
+ print('Downloading extracted motions') # noqa: T201
+ archivePath = call(f'tapis jobs show {jobid}')['archivePath'] # noqa: N806
+ archivePath = f'agave://designsafe.storage.default/{archivePath}/Istanbul' # noqa: N806
files = call(f'tapis files list {archivePath}/Events/')
if len(files) == 0:
- print('No files in the archive')
+ print('No files in the archive') # noqa: T201
else:
command = f'tapis files download {archivePath}/Events/ -W {savingDirectory}/'
command = command.split()
- run(command, stdout=PIPE, stderr=PIPE, text=True, check=False)
+ run(command, stdout=PIPE, stderr=PIPE, text=True, check=False) # noqa: S603, UP022
return res
diff --git a/modules/createEVENT/Istanbul/IstanbulApp2.py b/modules/createEVENT/Istanbul/IstanbulApp2.py
index cf686aafe..e295d9991 100644
--- a/modules/createEVENT/Istanbul/IstanbulApp2.py
+++ b/modules/createEVENT/Istanbul/IstanbulApp2.py
@@ -1,4 +1,4 @@
-# %%
+# %% # noqa: INP001, D100
import json
import os
import time
@@ -7,23 +7,23 @@
from agavepy.agave import Agave
# change the directory to the current directory
-os.chdir(os.path.dirname(os.path.realpath(__file__)))
+os.chdir(os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
-def Submit_tapis_job():
+def Submit_tapis_job(): # noqa: N802, D103
ag = Agave.restore()
- with open('TapisFiles/information.json') as file:
+ with open('TapisFiles/information.json') as file: # noqa: PTH123
information = json.load(file)
file.close()
# %%
profile = ag.profiles.get()
username = profile['username']
- savingDirectory = information['directory']
- if not os.path.exists(savingDirectory):
- os.makedirs(savingDirectory)
+ savingDirectory = information['directory'] # noqa: N806
+ if not os.path.exists(savingDirectory): # noqa: PTH110
+ os.makedirs(savingDirectory) # noqa: PTH103
- print('Uploading files to designsafe storage')
+ print('Uploading files to designsafe storage') # noqa: T201
ag.files.manage(
systemId='designsafe.storage.default',
filePath=f'{username}/',
@@ -35,20 +35,20 @@ def Submit_tapis_job():
body={'action': 'mkdir', 'path': 'Istanbul'},
)
# ag.files_mkdir(systemId="designsafe.storage.default", filePath=f"{username}/physics_based/Istanbul2")
- with open('TapisFiles/Istanbul.py', 'rb') as file:
+ with open('TapisFiles/Istanbul.py', 'rb') as file: # noqa: PTH123
result = ag.files.importData(
filePath=f'{username}/physics_based/Istanbul/',
fileToUpload=file,
systemId='designsafe.storage.default',
)
- with open('TapisFiles/information.json', 'rb') as file:
+ with open('TapisFiles/information.json', 'rb') as file: # noqa: PTH123
result = ag.files.importData(
filePath=f'{username}/physics_based/Istanbul/',
fileToUpload=file,
systemId='designsafe.storage.default',
)
- with open('TapisFiles/selectedSites.csv', 'rb') as file:
- result = ag.files.importData(
+ with open('TapisFiles/selectedSites.csv', 'rb') as file: # noqa: PTH123
+ result = ag.files.importData( # noqa: F841
filePath=f'{username}/physics_based/Istanbul/',
fileToUpload=file,
systemId='designsafe.storage.default',
@@ -70,10 +70,10 @@ def Submit_tapis_job():
}
# Generate a timestamp to append to the job name an
- timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
+ timestamp = datetime.now().strftime('%Y%m%d%H%M%S') # noqa: DTZ005
jobname = f'PhysicsBasedMotion_Istanbul_{username}_{timestamp}'
- print('Submitting job')
+ print('Submitting job') # noqa: T201
# submit the job
jobdict['name'] = jobname
jobdict['inputs']['inputDirectory'] = (
@@ -90,28 +90,28 @@ def Submit_tapis_job():
status = ag.jobs.getStatus(jobId=jobid)['status']
if count == 0:
last_status = status
- print('Job status: ', status)
+ print('Job status: ', status) # noqa: T201
count += 1
if last_status != status:
- print('Job status: ', status)
+ print('Job status: ', status) # noqa: T201
last_status = status
if status == 'FAILED':
- print('Job failed')
+ print('Job failed') # noqa: T201
break
time.sleep(10)
# %%
- print('Downloading extracted motions')
- archivePath = ag.jobs.get(jobId=jobid)['archivePath']
- archivePath = f'{archivePath}/Istanbul/Events/'
+ print('Downloading extracted motions') # noqa: T201
+ archivePath = ag.jobs.get(jobId=jobid)['archivePath'] # noqa: N806
+ archivePath = f'{archivePath}/Istanbul/Events/' # noqa: N806
files = ag.files.list(
filePath=archivePath, systemId='designsafe.storage.default'
)
# %%
if len(files) <= 1:
- print('No files in the archive')
+ print('No files in the archive') # noqa: T201
else:
for file in files:
filename = file['name']
@@ -121,6 +121,6 @@ def Submit_tapis_job():
res = ag.files.download(
filePath=path, systemId='designsafe.storage.default'
)
- with open(f'{savingDirectory}/{filename}', 'wb') as f:
+ with open(f'{savingDirectory}/{filename}', 'wb') as f: # noqa: PTH123
f.write(res.content)
# %%
diff --git a/modules/createEVENT/Istanbul/IstanbulRun.py b/modules/createEVENT/Istanbul/IstanbulRun.py
index 1142a548b..100e91db9 100644
--- a/modules/createEVENT/Istanbul/IstanbulRun.py
+++ b/modules/createEVENT/Istanbul/IstanbulRun.py
@@ -1,4 +1,4 @@
-import argparse
+import argparse # noqa: INP001, D100
import os
import IstanbulApp2
@@ -43,7 +43,7 @@
information['number_of_realizations'] = int(args.number)
# change the directory to the file location
- os.chdir(os.path.dirname(os.path.realpath(__file__)))
+ os.chdir(os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
IstanbulStations.getStations(information, plot=False, show=False)
IstanbulApp2.Submit_tapis_job()
- exit()
+ exit() # noqa: PLR1722
diff --git a/modules/createEVENT/Istanbul/IstanbulStations.py b/modules/createEVENT/Istanbul/IstanbulStations.py
index 8d7463a0d..3fcd70882 100644
--- a/modules/createEVENT/Istanbul/IstanbulStations.py
+++ b/modules/createEVENT/Istanbul/IstanbulStations.py
@@ -1,4 +1,4 @@
-import json
+import json # noqa: INP001, D100
import math
import os
@@ -7,10 +7,10 @@
from shapely.geometry import Point, Polygon
-def getStations(information, plot=False, show=False):
- """This function is used to retrieve the information of the Istanbul physics-based simulations"""
- RegionFlag = information['RegionFlag']
- LocationFlag = information['LocationFlag']
+def getStations(information, plot=False, show=False): # noqa: FBT002, C901, N802
+ """This function is used to retrieve the information of the Istanbul physics-based simulations""" # noqa: D400, D401, D404
+ RegionFlag = information['RegionFlag'] # noqa: N806
+ LocationFlag = information['LocationFlag'] # noqa: N806
if LocationFlag:
# get the location of the site
@@ -32,10 +32,10 @@ def getStations(information, plot=False, show=False):
radius = information['radius']
# Read the data from the csv file ignore indexing
- df_allSites = pd.read_csv(
+ df_allSites = pd.read_csv( # noqa: N806
'All_Stations_Lat_Lon_Vs30_BedrockDepth.csv', index_col=False
)
- df_allSites = df_allSites[['Longitude', 'Latitude', 'Depth (m)']]
+ df_allSites = df_allSites[['Longitude', 'Latitude', 'Depth (m)']] # noqa: N806
# add geometry using Lonnitude and Latitude
gdf = gpd.GeoDataFrame(
df_allSites,
@@ -52,12 +52,12 @@ def getStations(information, plot=False, show=False):
del df_allSites
directory = information['directory'] # directory to save the data
# create the directory if it does not exist
- if not os.path.exists(directory):
- os.makedirs(directory)
+ if not os.path.exists(directory): # noqa: PTH110
+ os.makedirs(directory) # noqa: PTH103
# empty the directory
files = os.listdir(directory)
for file in files:
- os.remove(directory + '/' + file)
+ os.remove(directory + '/' + file) # noqa: PTH107
if LocationFlag:
# find the nearest site to the location
@@ -71,7 +71,7 @@ def getStations(information, plot=False, show=False):
if RegionFlag:
if information['RegionShape'] == 'Rectangle':
# Create a polygton using min_lat, max_lat, min_lon, max_lon
- RegionofInterset = Polygon(
+ RegionofInterset = Polygon( # noqa: N806
[
(min_lon, min_lat),
(min_lon, max_lat),
@@ -88,7 +88,7 @@ def getStations(information, plot=False, show=False):
# check if the gdf is empty
if withinindicies.sum() == 0:
- print(
+ print( # noqa: T201
'No sites are found in the selected region please change the region of interest'
)
return
@@ -169,13 +169,13 @@ def getStations(information, plot=False, show=False):
gdf.drop(columns=['geometry', 'Color', 'Selected Site']).to_csv(
'TapisFiles/selectedSites.csv', index=True
)
- json.dump(information, open('TapisFiles/information.json', 'w'), indent=2)
+ json.dump(information, open('TapisFiles/information.json', 'w'), indent=2) # noqa: SIM115, PTH123
def haversine(lat1, lon1, lat2, lon2):
"""Calculate the great circle distance between two points
on the earth specified in decimal degrees.
- """
+ """ # noqa: D205
# Convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
@@ -190,7 +190,7 @@ def haversine(lat1, lon1, lat2, lon2):
r = 6371 # Radius of the Earth in kilometers
distance = r * c
- return distance
+ return distance # noqa: RET504
if __name__ == '__main__':
@@ -209,5 +209,5 @@ def haversine(lat1, lon1, lat2, lon2):
}
# change the directory to the file location
- os.chdir(os.path.dirname(os.path.realpath(__file__)))
+ os.chdir(os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
getStations(information, plot=False, show=False)
diff --git a/modules/createEVENT/Istanbul/TapisFiles/Istanbul.py b/modules/createEVENT/Istanbul/TapisFiles/Istanbul.py
index 1570006ad..10959e9fa 100644
--- a/modules/createEVENT/Istanbul/TapisFiles/Istanbul.py
+++ b/modules/createEVENT/Istanbul/TapisFiles/Istanbul.py
@@ -1,29 +1,29 @@
-# %%
+# %% # noqa: INP001, D100
import os
directory = './Events'
# check if the directory exists
-if not os.path.exists(directory):
- os.makedirs(directory)
-import json
+if not os.path.exists(directory): # noqa: PTH110
+ os.makedirs(directory) # noqa: PTH103
+import json # noqa: E402
-import numpy as np
-import pandas as pd
+import numpy as np # noqa: E402
+import pandas as pd # noqa: E402
-def Istanbul(information):
- TopoFlag = information['TopoFlag']
- LocationFlag = information['LocationFlag']
- numSiteGM = information['number_of_realizations']
+def Istanbul(information): # noqa: N802, D103
+ TopoFlag = information['TopoFlag'] # noqa: N806
+ LocationFlag = information['LocationFlag'] # noqa: N806
+ numSiteGM = information['number_of_realizations'] # noqa: N806
- randomFLag = True # if True, the realizations are selected randomly, otherwise, the first numSiteGM sites are selected
- maxnumSiteGM = 57
- numSiteGM = min(numSiteGM, maxnumSiteGM) # number of realizations
+ randomFLag = True # if True, the realizations are selected randomly, otherwise, the first numSiteGM sites are selected # noqa: N806
+ maxnumSiteGM = 57 # noqa: N806
+ numSiteGM = min(numSiteGM, maxnumSiteGM) # number of realizations # noqa: N806
directory = './Events'
# check if the directory exists
- if not os.path.exists(directory):
- os.makedirs(directory)
+ if not os.path.exists(directory): # noqa: PTH110
+ os.makedirs(directory) # noqa: PTH103
# changing realizations order
indices = list(range(1, maxnumSiteGM + 1))
@@ -35,21 +35,21 @@ def Istanbul(information):
if 'TopoFlag':
# IstanbulDirectory = '/corral-repl/projects/NHERI/published/PRJ-3712/GM_data/GM_topo/'
- IstanbulDirectory = '/home/jovyan/work/projects/PRJ-3712/GM_data/GM_topo/'
+ IstanbulDirectory = '/home/jovyan/work/projects/PRJ-3712/GM_data/GM_topo/' # noqa: N806
else:
# IstanbulDirectory = '/corral-repl/projects/NHERI/published/PRJ-3712/GM_data/GM_flat/'
- IstanbulDirectory = '/home/jovyan/work/projects/PRJ-3712/GM_data/GM_flat/'
+ IstanbulDirectory = '/home/jovyan/work/projects/PRJ-3712/GM_data/GM_flat/' # noqa: N806
# print number of cites
- print(f'Number of sites: {len(gdf)}')
+ print(f'Number of sites: {len(gdf)}') # noqa: T201
for realization in indices:
# load the data frame from the hdf file
if TopoFlag:
- df = pd.HDFStore(
+ df = pd.HDFStore( # noqa: PD901
f'{IstanbulDirectory}/Istanbul_sim{realization}.hdf5', 'r'
)
else:
- df = pd.HDFStore(
+ df = pd.HDFStore( # noqa: PD901
f'{IstanbulDirectory}/Istanbul_sim{realization}_flat.hdf5', 'r'
)
@@ -79,13 +79,13 @@ def Istanbul(information):
)
-def write_motion(site_name, directory, i, motiondict):
+def write_motion(site_name, directory, i, motiondict): # noqa: D103
filename = f'{directory}/site_{site_name}_{i}.json'
- with open(filename, 'w') as f:
+ with open(filename, 'w') as f: # noqa: PTH123
json.dump(motiondict, f, indent=2)
# get the location flag
-with open('information.json') as file:
+with open('information.json') as file: # noqa: PTH123
information = json.load(file)
Istanbul(information)
diff --git a/modules/createEVENT/LLNL_SW4/LLNL_SW4.py b/modules/createEVENT/LLNL_SW4/LLNL_SW4.py
index ac1ef4b6c..2deb35859 100644
--- a/modules/createEVENT/LLNL_SW4/LLNL_SW4.py
+++ b/modules/createEVENT/LLNL_SW4/LLNL_SW4.py
@@ -1,4 +1,4 @@
-import argparse
+import argparse # noqa: INP001, D100
import json
import posixpath
import sys
@@ -6,8 +6,8 @@
import numpy as np
-def write_RV(BIM_file, EVENT_file, data_dir):
- with open(BIM_file) as f:
+def write_RV(BIM_file, EVENT_file, data_dir): # noqa: N802, N803, D103
+ with open(BIM_file) as f: # noqa: PTH123
bim_data = json.load(f)
event_file = {'randomVariables': [], 'Events': []}
@@ -35,10 +35,10 @@ def write_RV(BIM_file, EVENT_file, data_dir):
}
)
- RV_elements = []
+ RV_elements = [] # noqa: N806
for event in events:
if event['EventClassification'] == 'Earthquake':
- RV_elements.append(event['fileName'])
+ RV_elements.append(event['fileName']) # noqa: PERF401
event_file['randomVariables'][0]['elements'] = RV_elements
@@ -47,14 +47,14 @@ def write_RV(BIM_file, EVENT_file, data_dir):
load_record(events[0]['fileName'], data_dir, empty=True)
)
- with open(EVENT_file, 'w') as f:
+ with open(EVENT_file, 'w') as f: # noqa: PTH123
json.dump(event_file, f, indent=2)
-def load_record(fileName, data_dir, scale_factor=1.0, empty=False):
- fileName = fileName.split('x')[0]
+def load_record(fileName, data_dir, scale_factor=1.0, empty=False): # noqa: FBT002, N803, D103
+ fileName = fileName.split('x')[0] # noqa: N806
- with open(posixpath.join(data_dir, f'{fileName}.json')) as f:
+ with open(posixpath.join(data_dir, f'{fileName}.json')) as f: # noqa: PTH123
event_data = json.load(f)
event_dic = {
@@ -69,7 +69,7 @@ def load_record(fileName, data_dir, scale_factor=1.0, empty=False):
for i, (src_label, tar_label) in enumerate(
zip(['data_x', 'data_y'], ['accel_X', 'accel_Y'])
):
- if src_label in event_data.keys():
+ if src_label in event_data.keys(): # noqa: SIM118
event_dic['timeSeries'].append(
{
'name': tar_label,
@@ -89,16 +89,16 @@ def load_record(fileName, data_dir, scale_factor=1.0, empty=False):
return event_dic
-def get_records(BIM_file, EVENT_file, data_dir):
- with open(BIM_file) as f:
+def get_records(BIM_file, EVENT_file, data_dir): # noqa: N803, D103
+ with open(BIM_file) as f: # noqa: PTH123
bim_file = json.load(f)
- with open(EVENT_file) as f:
+ with open(EVENT_file) as f: # noqa: PTH123
event_file = json.load(f)
event_id = event_file['Events'][0]['event_id']
- scale_factor = dict(
+ scale_factor = dict( # noqa: C404
[
(evt['fileName'], evt.get('factor', 1.0))
for evt in bim_file['Events']['Events']
@@ -107,7 +107,7 @@ def get_records(BIM_file, EVENT_file, data_dir):
event_file['Events'][0].update(load_record(event_id, data_dir, scale_factor))
- with open(EVENT_file, 'w') as f:
+ with open(EVENT_file, 'w') as f: # noqa: PTH123
json.dump(event_file, f, indent=2)
diff --git a/modules/createEVENT/M9/M9API.py b/modules/createEVENT/M9/M9API.py
index 0f231b2dc..52702a163 100644
--- a/modules/createEVENT/M9/M9API.py
+++ b/modules/createEVENT/M9/M9API.py
@@ -1,4 +1,4 @@
-# %%
+# %% # noqa: INP001, D100
import subprocess
import sys
@@ -11,9 +11,9 @@
modules_reqd = {'numpy', 'pandas', 'geopandas', 'shapely', 'requests', 'argparse'}
modules_installed = set()
for x in importlib_metadata.distributions():
- try:
+ try: # noqa: SIM105
modules_installed.add(x.name)
- except:
+ except: # noqa: S110, PERF203, E722
pass
# If installed packages could not be detected, use importlib_metadata backport:
@@ -21,43 +21,43 @@
import importlib_metadata
for x in importlib_metadata.distributions():
- try:
+ try: # noqa: SIM105
modules_installed.add(x.name)
- except:
+ except: # noqa: S110, PERF203, E722
pass
missing = modules_reqd - modules_installed
if missing:
python = sys.executable
- print('\nInstalling packages required for running this widget...')
- subprocess.check_call(
+ print('\nInstalling packages required for running this widget...') # noqa: T201
+ subprocess.check_call( # noqa: S603
[python, '-m', 'pip', 'install', '--user', *missing],
stdout=subprocess.DEVNULL,
)
- print('Successfully installed the required packages')
+ print('Successfully installed the required packages') # noqa: T201
#
# now import our packages
#
-import json
-import math
-import os
+import json # noqa: E402
+import math # noqa: E402
+import os # noqa: E402
-import geopandas as gpd
-import numpy as np
-import pandas as pd
-import requests
-from shapely.geometry import Point, Polygon
+import geopandas as gpd # noqa: E402
+import numpy as np # noqa: E402
+import pandas as pd # noqa: E402
+import requests # noqa: E402
+from shapely.geometry import Point, Polygon # noqa: E402
# %%
-def M9(information):
+def M9(information): # noqa: C901, N802
"""The default is to select sites from all M9 sites, but
grid type (options: A, B, C, D, E, Y, and Z, can be empty)
(ref: https://sites.uw.edu/pnet/m9-simulations/about-m9-simulations/extent-of-model/)
- """
+ """ # noqa: D205, D400, D401
site_location = information['LocationFlag']
if site_location:
@@ -84,12 +84,12 @@ def M9(information):
'grid_type'
] # grid type (options: A, B, C, D, E, Y, and Z, can be "all")
- randomFLag = True # if True, the realizations are selected randomly, otherwise, the first numSiteGM sites are selected
- numSiteGM = information[
+ randomFLag = True # if True, the realizations are selected randomly, otherwise, the first numSiteGM sites are selected # noqa: N806
+ numSiteGM = information[ # noqa: N806
'number_of_realizations'
] # number of realizations
- maxnumSiteGM = 30
- numSiteGM = min(numSiteGM, maxnumSiteGM) # number of realizations
+ maxnumSiteGM = 30 # noqa: N806
+ numSiteGM = min(numSiteGM, maxnumSiteGM) # number of realizations # noqa: N806
# changing realizations order
indices = list(range(maxnumSiteGM))
@@ -99,18 +99,18 @@ def M9(information):
directory = information['directory'] # directory to save the data
# create the directory if it does not exist
- if not os.path.exists(directory):
- os.makedirs(directory)
+ if not os.path.exists(directory): # noqa: PTH110
+ os.makedirs(directory) # noqa: PTH103
# remove the files in the directory
# os.system(f'rm -r {directory}/*')
# load the sites information
- path_script = os.path.dirname(os.path.abspath(__file__))
+ path_script = os.path.dirname(os.path.abspath(__file__)) # noqa: PTH100, PTH120
path_site_file = path_script + '/M9_sites.csv'
- print(path_site_file)
- df_allSites = pd.read_csv(path_site_file, index_col=False)
+ print(path_site_file) # noqa: T201
+ df_allSites = pd.read_csv(path_site_file, index_col=False) # noqa: N806
# create a geopandas dataframe
gdf = gpd.GeoDataFrame(
@@ -122,7 +122,7 @@ def M9(information):
del df_allSites
# limitation of each grid type (minx, miny, maxx, maxy)
- Gridboxes = {
+ Gridboxes = { # noqa: N806
'A': (-123.2147269, 46.90566609, -121.1246222, 48.31489086),
'B': (-128.4741831, 40.26059707, -121.0785236, 49.1785082),
'C': (-123.2568915, 45.19862425, -122.2252305, 45.92126901),
@@ -152,12 +152,12 @@ def M9(information):
if site_location:
# first check if the location is inner the regoin
if not region.contains(Point(lon, lat)):
- print('The location is not in the selected grid region')
- print(
+ print('The location is not in the selected grid region') # noqa: T201
+ print( # noqa: T201
'Please select a location in the region or change the grid type to "All"'
)
return
- else:
+ else: # noqa: RET505
# find the nearest site to the location
gdf['distance'] = gdf.distance(Point(lon, lat))
gdf = gdf.sort_values('distance')
@@ -168,7 +168,7 @@ def M9(information):
if information['RegionShape'] == 'Rectangle':
# Create a polygton using min_lat, max_lat, min_lon, max_lon
- RegionofInterset = Polygon(
+ RegionofInterset = Polygon( # noqa: N806
[
(min_lon, min_lat),
(min_lon, max_lat),
@@ -179,23 +179,23 @@ def M9(information):
# Check that if the RegionofInterset and the region has intersection
if not region.intersects(RegionofInterset):
- print('The selected region is not in the selected grid region')
- print(
+ print('The selected region is not in the selected grid region') # noqa: T201
+ print( # noqa: T201
'Please select a region in in the or change the grid type to "All"'
)
return
- else:
+ else: # noqa: RET505
# Check if the RegionofInterset is in the region
if not region.contains(RegionofInterset):
- print(
+ print( # noqa: T201
'The selected region is not entirely in the selected grid region'
)
- print(
+ print( # noqa: T201
'The selected region will be changed to the intersection of the selected region and the grid region'
)
- RegionofInterset = region.intersection(RegionofInterset)
+ RegionofInterset = region.intersection(RegionofInterset) # noqa: N806
else:
- print(
+ print( # noqa: T201
'The selected region is entirely in the selected grid region'
)
# now filter the sites that are in the regionofInterset
@@ -217,31 +217,31 @@ def M9(information):
]
gdf = gdf[gdf['distance'] < radius]
- APIFLAG = information[
+ APIFLAG = information[ # noqa: N806
'APIFLAG'
] # if the APIFLAG is True, we use M9 API to get the motion data
if APIFLAG:
# query flags
- ResponseSpectra = True
+ ResponseSpectra = True # noqa: N806
# get the motion data from the API
for _, site in gdf.iterrows():
# get the motion data from the API
site_name = site['Station Name']
- jobURL = f'https://m9-broadband-download-rwqks6gbba-uc.a.run.app/getMotionFromStationName?StationName={site_name}&ResponseSpectra={ResponseSpectra}'
+ jobURL = f'https://m9-broadband-download-rwqks6gbba-uc.a.run.app/getMotionFromStationName?StationName={site_name}&ResponseSpectra={ResponseSpectra}' # noqa: N806
res_success = False
iter_num = 0
max_iter = 5
- print(f'Getting the motion data for {site_name}')
+ print(f'Getting the motion data for {site_name}') # noqa: T201
while not (res_success) and (iter_num < max_iter):
- res = requests.get(jobURL)
- res_success = res.status_code == 200
+ res = requests.get(jobURL) # noqa: S113
+ res_success = res.status_code == 200 # noqa: PLR2004
iter_num = iter_num + 1
if res_success:
- gmData = res.json()
+ gmData = res.json() # noqa: N806
for i in indices:
write_motion(site_name, directory, i, gmData[i], APIFLAG)
gdf['filename'] = f'{site_name}_{i}'
@@ -249,13 +249,13 @@ def M9(information):
if site_location:
break
else:
- print(f'URL not replied for {site_name}, skipping for now')
+ print(f'URL not replied for {site_name}, skipping for now') # noqa: T201
if site_location:
- print('trying the next nearest site')
+ print('trying the next nearest site') # noqa: T201
if site_location and not (res_success):
- print('None of the nearest sites have motion data')
- print('Please check your internet connection or try again later')
+ print('None of the nearest sites have motion data') # noqa: T201
+ print('Please check your internet connection or try again later') # noqa: T201
if not (APIFLAG):
indices = ['030']
@@ -265,19 +265,19 @@ def M9(information):
site_name = site['Station Name']
lat = site['Latitude']
lon = site['Longitude']
- firstLetter = site_name[0]
+ firstLetter = site_name[0] # noqa: N806
filename = f'./csz{indices[0]}/{firstLetter}/Xarray.nc'
# reading the nc file
- data = xr.open_dataset(filename)
+ data = xr.open_dataset(filename) # noqa: F821
subset = data.sel(lat=lat, lon=lon, method='nearest')
- dt = data.coords['time'].values
+ dt = data.coords['time'].values # noqa: PD011
dt = dt[1] - dt[0]
sitedata = {
'dT': dt,
- 'accel_x': subset['acc_x'].values.tolist(),
- 'accel_y': subset['acc_y'].values.tolist(),
- 'accel_z': subset['acc_z'].values.tolist(),
+ 'accel_x': subset['acc_x'].values.tolist(), # noqa: PD011
+ 'accel_y': subset['acc_y'].values.tolist(), # noqa: PD011
+ 'accel_z': subset['acc_z'].values.tolist(), # noqa: PD011
}
write_motion(site_name, directory, i, sitedata, APIFLAG)
gdf['filename'] = f'{site_name}_{i}'
@@ -288,7 +288,7 @@ def M9(information):
)
-def write_motion(site_name, directory, i, motiondict, APIFLAG):
+def write_motion(site_name, directory, i, motiondict, APIFLAG): # noqa: N803, D103
filename = f'{directory}/{site_name}_{i}.json'
if APIFLAG:
@@ -310,14 +310,14 @@ def write_motion(site_name, directory, i, motiondict, APIFLAG):
datatowrite['Data'] = 'Time history generated using M9 simulations'
datatowrite['name'] = f'{site_name}_{i}'
- with open(filename, 'w') as f:
+ with open(filename, 'w') as f: # noqa: PTH123
json.dump(datatowrite, f, indent=2)
def haversine(lat1, lon1, lat2, lon2):
"""Calculate the great circle distance between two points
on the earth specified in decimal degrees.
- """
+ """ # noqa: D205
# Convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
@@ -332,4 +332,4 @@ def haversine(lat1, lon1, lat2, lon2):
r = 6371 # Radius of the Earth in kilometers
distance = r * c
- return distance
+ return distance # noqa: RET504
diff --git a/modules/createEVENT/M9/M9App.py b/modules/createEVENT/M9/M9App.py
index 1e2f5f9d6..2a2663531 100644
--- a/modules/createEVENT/M9/M9App.py
+++ b/modules/createEVENT/M9/M9App.py
@@ -1,4 +1,4 @@
-# %%
+# %% # noqa: INP001, D100
import json
import os
import time
@@ -6,35 +6,35 @@
from subprocess import PIPE, run
# change the directory to the current directory
-os.chdir(os.path.dirname(os.path.realpath(__file__)))
+os.chdir(os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
# %%
# helper function to call the tapis command
-def call(command):
+def call(command): # noqa: D103
command = command.split()
command.append('-f')
command.append('json')
- result = run(command, stdout=PIPE, stderr=PIPE, text=True, check=False)
+ result = run(command, stdout=PIPE, stderr=PIPE, text=True, check=False) # noqa: S603, UP022
result = json.loads(result.stdout)
- return result
+ return result # noqa: RET504
# %%
-def Submit_tapis_job():
- with open('TapisFiles/information.json') as file:
+def Submit_tapis_job(): # noqa: N802, D103
+ with open('TapisFiles/information.json') as file: # noqa: PTH123
information = json.load(file)
file.close()
profile = call('tapis profiles show self')
username = profile['username']
email = profile['email']
- savingDirectory = information['directory']
+ savingDirectory = information['directory'] # noqa: N806
- if not os.path.exists(savingDirectory):
- os.makedirs(savingDirectory)
+ if not os.path.exists(savingDirectory): # noqa: PTH110
+ os.makedirs(savingDirectory) # noqa: PTH103
- print('Uploading files to designsafe storage')
+ print('Uploading files to designsafe storage') # noqa: T201
call(
f'tapis files mkdir agave://designsafe.storage.default/{username}/ physics_based'
)
@@ -68,10 +68,10 @@ def Submit_tapis_job():
}
# Generate a timestamp to append to the job name an
- timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
+ timestamp = datetime.now().strftime('%Y%m%d%H%M%S') # noqa: DTZ005
jobname = f'PhysicsBasedMotion_M9_{username}_{timestamp}'
- print('Submitting job')
+ print('Submitting job') # noqa: T201
jobdict['name'] = jobname
jobdict['inputs']['inputDirectory'] = (
f'agave://designsafe.storage.default/{username}/physics_based/M9/'
@@ -80,7 +80,7 @@ def Submit_tapis_job():
# submit the job
jobfile = './TapisFiles/job.json'
- json.dump(jobdict, open(jobfile, 'w'), indent=2)
+ json.dump(jobdict, open(jobfile, 'w'), indent=2) # noqa: SIM115, PTH123
res = call(f'tapis jobs submit -F {jobfile}')
# delete the job file
@@ -95,30 +95,30 @@ def Submit_tapis_job():
status = call(f'tapis jobs status {jobid} ')['status']
if count == 0:
last_status = status
- print('Job status: ', status)
+ print('Job status: ', status) # noqa: T201
count += 1
if last_status != status:
- print('Job status: ', status)
+ print('Job status: ', status) # noqa: T201
last_status = status
if status == 'FAILED':
- print('Job failed')
+ print('Job failed') # noqa: T201
break
time.sleep(10)
# # %%
# # %%
- print('Downloading extracted motions')
- archivePath = call(f'tapis jobs show {jobid}')['archivePath']
- archivePath = f'agave://designsafe.storage.default/{archivePath}/M9'
+ print('Downloading extracted motions') # noqa: T201
+ archivePath = call(f'tapis jobs show {jobid}')['archivePath'] # noqa: N806
+ archivePath = f'agave://designsafe.storage.default/{archivePath}/M9' # noqa: N806
files = call(f'tapis files list {archivePath}/Events/')
if len(files) == 0:
- print('No files in the archive')
+ print('No files in the archive') # noqa: T201
else:
command = f'tapis files download {archivePath}/Events/ -W {savingDirectory}/'
command = command.split()
- run(command, stdout=PIPE, stderr=PIPE, text=True, check=False)
+ run(command, stdout=PIPE, stderr=PIPE, text=True, check=False) # noqa: S603, UP022
return res
diff --git a/modules/createEVENT/M9/M9App2.py b/modules/createEVENT/M9/M9App2.py
index 878d479b7..038945a13 100644
--- a/modules/createEVENT/M9/M9App2.py
+++ b/modules/createEVENT/M9/M9App2.py
@@ -1,4 +1,4 @@
-# %%
+# %% # noqa: INP001, D100
import json
import os
import time
@@ -7,23 +7,23 @@
from agavepy.agave import Agave
# change the directory to the current directory
-os.chdir(os.path.dirname(os.path.realpath(__file__)))
+os.chdir(os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
-def Submit_tapis_job():
+def Submit_tapis_job(): # noqa: N802, D103
ag = Agave.restore()
- with open('TapisFiles/information.json') as file:
+ with open('TapisFiles/information.json') as file: # noqa: PTH123
information = json.load(file)
file.close()
# %%
profile = ag.profiles.get()
username = profile['username']
- savingDirectory = information['directory']
- if not os.path.exists(savingDirectory):
- os.makedirs(savingDirectory)
+ savingDirectory = information['directory'] # noqa: N806
+ if not os.path.exists(savingDirectory): # noqa: PTH110
+ os.makedirs(savingDirectory) # noqa: PTH103
- print('Uploading files to designsafe storage')
+ print('Uploading files to designsafe storage') # noqa: T201
ag.files.manage(
systemId='designsafe.storage.default',
filePath=f'{username}/',
@@ -35,20 +35,20 @@ def Submit_tapis_job():
body={'action': 'mkdir', 'path': 'M9'},
)
# ag.files_mkdir(systemId="designsafe.storage.default", filePath=f"{username}/physics_based/Istanbul2")
- with open('TapisFiles/M9.py', 'rb') as file:
+ with open('TapisFiles/M9.py', 'rb') as file: # noqa: PTH123
result = ag.files.importData(
filePath=f'{username}/physics_based/M9/',
fileToUpload=file,
systemId='designsafe.storage.default',
)
- with open('TapisFiles/information.json', 'rb') as file:
+ with open('TapisFiles/information.json', 'rb') as file: # noqa: PTH123
result = ag.files.importData(
filePath=f'{username}/physics_based/M9/',
fileToUpload=file,
systemId='designsafe.storage.default',
)
- with open('TapisFiles/selectedSites.csv', 'rb') as file:
- result = ag.files.importData(
+ with open('TapisFiles/selectedSites.csv', 'rb') as file: # noqa: PTH123
+ result = ag.files.importData( # noqa: F841
filePath=f'{username}/physics_based/M9/',
fileToUpload=file,
systemId='designsafe.storage.default',
@@ -70,10 +70,10 @@ def Submit_tapis_job():
}
# Generate a timestamp to append to the job name an
- timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
+ timestamp = datetime.now().strftime('%Y%m%d%H%M%S') # noqa: DTZ005
jobname = f'PhysicsBasedMotion_M9_{username}_{timestamp}'
- print('Submitting job')
+ print('Submitting job') # noqa: T201
# submit the job
jobdict['name'] = jobname
jobdict['inputs']['inputDirectory'] = (
@@ -90,28 +90,28 @@ def Submit_tapis_job():
status = ag.jobs.getStatus(jobId=jobid)['status']
if count == 0:
last_status = status
- print('Job status: ', status)
+ print('Job status: ', status) # noqa: T201
count += 1
if last_status != status:
- print('Job status: ', status)
+ print('Job status: ', status) # noqa: T201
last_status = status
if status == 'FAILED':
- print('Job failed')
+ print('Job failed') # noqa: T201
break
time.sleep(10)
# %%
- print('Downloading extracted motions')
- archivePath = ag.jobs.get(jobId=jobid)['archivePath']
- archivePath = f'{archivePath}/M9/Events/'
+ print('Downloading extracted motions') # noqa: T201
+ archivePath = ag.jobs.get(jobId=jobid)['archivePath'] # noqa: N806
+ archivePath = f'{archivePath}/M9/Events/' # noqa: N806
files = ag.files.list(
filePath=archivePath, systemId='designsafe.storage.default'
)
# %%
if len(files) <= 1:
- print('No files in the archive')
+ print('No files in the archive') # noqa: T201
else:
for file in files:
filename = file['name']
@@ -121,6 +121,6 @@ def Submit_tapis_job():
res = ag.files.download(
filePath=path, systemId='designsafe.storage.default'
)
- with open(f'{savingDirectory}/{filename}', 'wb') as f:
+ with open(f'{savingDirectory}/{filename}', 'wb') as f: # noqa: PTH123
f.write(res.content)
# %%
diff --git a/modules/createEVENT/M9/M9Run.py b/modules/createEVENT/M9/M9Run.py
index 81edb6522..1525b2094 100644
--- a/modules/createEVENT/M9/M9Run.py
+++ b/modules/createEVENT/M9/M9Run.py
@@ -1,4 +1,4 @@
-import argparse
+import argparse # noqa: INP001, D100
import os
import M9API
@@ -56,13 +56,13 @@
#
# go get the motions
#
- os.chdir(os.path.dirname(os.path.realpath(__file__)))
+ os.chdir(os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
if information['APIFLAG']:
- print(
+ print( # noqa: T201
'Using API for extracting motions:\n This may take a while. Please be patient.'
)
M9API.M9(information)
else:
M9Stations.getStations(information, plot=False, show=False)
M9App2.Submit_tapis_job()
- exit()
+ exit() # noqa: PLR1722
diff --git a/modules/createEVENT/M9/M9Stations.py b/modules/createEVENT/M9/M9Stations.py
index 0b3d9dbea..1c5ccf699 100644
--- a/modules/createEVENT/M9/M9Stations.py
+++ b/modules/createEVENT/M9/M9Stations.py
@@ -1,4 +1,4 @@
-# %%
+# %% # noqa: INP001, D100
# required libraries numpy, geoandas,pandas,plotly
import json
import math
@@ -8,9 +8,9 @@
from shapely.geometry import Point, Polygon
-def getStations(information, plot=False, show=False):
- RegionFlag = information['RegionFlag']
- LocationFlag = information['LocationFlag']
+def getStations(information, plot=False, show=False): # noqa: FBT002, C901, N802, D103
+ RegionFlag = information['RegionFlag'] # noqa: N806
+ LocationFlag = information['LocationFlag'] # noqa: N806
if LocationFlag:
# get the location of the site
@@ -36,7 +36,7 @@ def getStations(information, plot=False, show=False):
] # grid type (options: A, B, C, D, E, Y, and Z, can be "all")
# load the sites information
- df_allSites = pd.read_csv('M9_sites.csv', index_col=False)
+ df_allSites = pd.read_csv('M9_sites.csv', index_col=False) # noqa: N806
# create a geopandas dataframe
gdf = gpd.GeoDataFrame(
@@ -48,7 +48,7 @@ def getStations(information, plot=False, show=False):
del df_allSites
# limitation of each grid type (minx, miny, maxx, maxy)
- Gridboxes = {
+ Gridboxes = { # noqa: N806
'A': (-123.2147269, 46.90566609, -121.1246222, 48.31489086),
'B': (-128.4741831, 40.26059707, -121.0785236, 49.1785082),
'C': (-123.2568915, 45.19862425, -122.2252305, 45.92126901),
@@ -78,12 +78,12 @@ def getStations(information, plot=False, show=False):
if LocationFlag:
# first check if the location is inner the regoin
if not region.contains(Point(lon, lat)):
- print('The location is not in the selected grid region')
- print(
+ print('The location is not in the selected grid region') # noqa: T201
+ print( # noqa: T201
'Please select a location in the region or change the grid type to "All"'
)
return
- else:
+ else: # noqa: RET505
# find the nearest site to the location
gdf['distance'] = gdf.distance(Point(lon, lat))
gdf = gdf.sort_values('distance')
@@ -94,7 +94,7 @@ def getStations(information, plot=False, show=False):
if RegionFlag:
if information['RegionShape'] == 'Rectangle':
# Create a polygton using min_lat, max_lat, min_lon, max_lon
- RegionofInterset = Polygon(
+ RegionofInterset = Polygon( # noqa: N806
[
(min_lon, min_lat),
(min_lon, max_lat),
@@ -105,23 +105,23 @@ def getStations(information, plot=False, show=False):
# Check that if the RegionofInterset and the region has intersection
if not region.intersects(RegionofInterset):
- print('The selected region is not in the selected grid region')
- print(
+ print('The selected region is not in the selected grid region') # noqa: T201
+ print( # noqa: T201
'Please select a region in in the or change the grid type to "All"'
)
return
- else:
+ else: # noqa: RET505
# Check if the RegionofInterset is in the region
if not region.contains(RegionofInterset):
- print(
+ print( # noqa: T201
'The selected region is not entirely in the selected grid region'
)
- print(
+ print( # noqa: T201
'The selected region will be changed to the intersection of the selected region and the grid region'
)
- RegionofInterset = region.intersection(RegionofInterset)
+ RegionofInterset = region.intersection(RegionofInterset) # noqa: N806
else:
- print(
+ print( # noqa: T201
'The selected region is entirely in the selected grid region'
)
# now filter the sites that are in the regionofInterset
@@ -207,14 +207,14 @@ def getStations(information, plot=False, show=False):
gdf.drop(columns=['geometry', 'Color', 'Selected Site']).to_csv(
'TapisFiles/selectedSites.csv', index=True
)
- json.dump(information, open('TapisFiles/information.json', 'w'), indent=2)
+ json.dump(information, open('TapisFiles/information.json', 'w'), indent=2) # noqa: SIM115, PTH123
# fig.show()
def haversine(lat1, lon1, lat2, lon2):
"""Calculate the great circle distance between two points
on the earth specified in decimal degrees.
- """
+ """ # noqa: D205
# Convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
@@ -229,4 +229,4 @@ def haversine(lat1, lon1, lat2, lon2):
r = 6371 # Radius of the Earth in kilometers
distance = r * c
- return distance
+ return distance # noqa: RET504
diff --git a/modules/createEVENT/M9/TapisFiles/M9.py b/modules/createEVENT/M9/TapisFiles/M9.py
index 5b0d836ee..cb31eaacf 100644
--- a/modules/createEVENT/M9/TapisFiles/M9.py
+++ b/modules/createEVENT/M9/TapisFiles/M9.py
@@ -1,4 +1,4 @@
-# %%
+# %% # noqa: INP001, D100
import json
import os
@@ -9,38 +9,38 @@
# 'netcdf4', 'h5netcdf', 'scipy'
# %%
-def M9(information):
+def M9(information): # noqa: N802
"""The default is to select sites from all M9 sites, but
grid type (options: A, B, C, D, E, Y, and Z, can be empty)
(ref: https://sites.uw.edu/pnet/m9-simulations/about-m9-simulations/extent-of-model/)
- """
- LocationFlag = information['LocationFlag']
- numSiteGM = information['number_of_realizations']
- grid_type = information[
+ """ # noqa: D205, D400, D401
+ LocationFlag = information['LocationFlag'] # noqa: N806
+ numSiteGM = information['number_of_realizations'] # noqa: N806
+ grid_type = information[ # noqa: F841
'grid_type'
] # grid type (options: A, B, C, D, E, Y, and Z, can be "all")
- randomFLag = True # if True, the realizations are selected randomly, otherwise, the first numSiteGM sites are selected
- maxnumSiteGM = 30
- numSiteGM = min(numSiteGM, maxnumSiteGM) # number of realizations
+ randomFLag = True # if True, the realizations are selected randomly, otherwise, the first numSiteGM sites are selected # noqa: N806
+ maxnumSiteGM = 30 # noqa: N806
+ numSiteGM = min(numSiteGM, maxnumSiteGM) # number of realizations # noqa: N806
# changing realizations order
# indices = list(range(maxnumSiteGM));
- Realizations = [f'{i:03}' for i in range(1, 33)]
+ Realizations = [f'{i:03}' for i in range(1, 33)] # noqa: N806
indices = np.arange(32)
if randomFLag:
np.random.shuffle(indices)
indices = indices[:numSiteGM]
- M9Path = '/home/jovyan/work/projects/PRJ-4603'
+ M9Path = '/home/jovyan/work/projects/PRJ-4603' # noqa: N806
directory = './Events' # directory to save the data
# create the directory if it does not exist
- if not os.path.exists(directory):
- os.makedirs(directory)
+ if not os.path.exists(directory): # noqa: PTH110
+ os.makedirs(directory) # noqa: PTH103
gdf = pd.read_csv('selectedSites.csv', index_col=0)
- APIFLAG = information[
+ APIFLAG = information[ # noqa: N806
'APIFLAG'
] # if the APIFLAG is True, we use M9 API to get the motion data
@@ -51,19 +51,19 @@ def M9(information):
site_name = site['Station Name']
lat = site['Latitude']
lon = site['Longitude']
- firstLetter = site_name[0]
+ firstLetter = site_name[0] # noqa: N806
filename = f'{M9Path}/csz{Realizations[i]}/{firstLetter}/Xarray.nc'
# reading the nc file
data = xr.open_dataset(filename)
subset = data.sel(lat=lat, lon=lon, method='nearest')
- dt = data.coords['time'].values
+ dt = data.coords['time'].values # noqa: PD011
dt = dt[1] - dt[0]
sitedata = {
'dT': dt,
- 'accel_x': subset['acc_x'].values.tolist(),
- 'accel_y': subset['acc_y'].values.tolist(),
- 'accel_z': subset['acc_z'].values.tolist(),
+ 'accel_x': subset['acc_x'].values.tolist(), # noqa: PD011
+ 'accel_y': subset['acc_y'].values.tolist(), # noqa: PD011
+ 'accel_z': subset['acc_z'].values.tolist(), # noqa: PD011
}
write_motion(site_name, directory, i, sitedata, APIFLAG)
gdf['filename'] = f'{site_name}_{i}'
@@ -79,7 +79,7 @@ def M9(information):
)
-def write_motion(site_name, directory, i, motiondict, APIFLAG):
+def write_motion(site_name, directory, i, motiondict, APIFLAG): # noqa: N803, D103
filename = f'{directory}/{site_name}_{i}.json'
if APIFLAG:
@@ -101,14 +101,14 @@ def write_motion(site_name, directory, i, motiondict, APIFLAG):
datatowrite['Data'] = 'Time history generated using M9 simulations'
datatowrite['name'] = f'{site_name}_{i}'
- with open(filename, 'w') as f:
+ with open(filename, 'w') as f: # noqa: PTH123
json.dump(datatowrite, f, indent=2)
if __name__ == '__main__':
# change the directory to the directory of the current file
- os.chdir(os.path.dirname(os.path.abspath(__file__)))
+ os.chdir(os.path.dirname(os.path.abspath(__file__))) # noqa: PTH100, PTH120
- with open('information.json') as file:
+ with open('information.json') as file: # noqa: PTH123
information = json.load(file)
M9(information)
diff --git a/modules/createEVENT/MPM/MPM.py b/modules/createEVENT/MPM/MPM.py
index 101a44a96..ea3bbf431 100644
--- a/modules/createEVENT/MPM/MPM.py
+++ b/modules/createEVENT/MPM/MPM.py
@@ -1,35 +1,35 @@
-import argparse
+import argparse # noqa: INP001, D100
import json
-class FloorForces:
+class FloorForces: # noqa: D101
def __init__(self):
self.X = [0]
self.Y = [0]
self.Z = [0]
-def directionToDof(direction):
- """Converts direction to degree of freedom"""
- directioMap = {'X': 1, 'Y': 2, 'Z': 3}
+def directionToDof(direction): # noqa: N802
+ """Converts direction to degree of freedom""" # noqa: D400, D401
+ directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806
return directioMap[direction]
-def addFloorForceToEvent(
- timeSeriesArray,
- patternsArray,
+def addFloorForceToEvent( # noqa: N802
+ timeSeriesArray, # noqa: N803
+ patternsArray, # noqa: N803
force,
direction,
floor,
- dT,
+ dT, # noqa: N803
):
- """Add force (one component) time series and pattern in the event file"""
- seriesName = 'HydroForceSeries_' + str(floor) + direction
- timeSeries = {'name': seriesName, 'dT': dT, 'type': 'Value', 'data': force}
+ """Add force (one component) time series and pattern in the event file""" # noqa: D400
+ seriesName = 'HydroForceSeries_' + str(floor) + direction # noqa: N806
+ timeSeries = {'name': seriesName, 'dT': dT, 'type': 'Value', 'data': force} # noqa: N806
timeSeriesArray.append(timeSeries)
- patternName = 'HydroForcePattern_' + str(floor) + direction
+ patternName = 'HydroForcePattern_' + str(floor) + direction # noqa: N806
pattern = {
'name': patternName,
'timeSeries': seriesName,
@@ -41,10 +41,10 @@ def addFloorForceToEvent(
patternsArray.append(pattern)
-def addFloorForceToEvent(patternsArray, force, direction, floor):
- """Add force (one component) time series and pattern in the event file"""
- seriesName = 'HydroForceSeries_' + str(floor) + direction
- patternName = 'HydroForcePattern_' + str(floor) + direction
+def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803, F811
+ """Add force (one component) time series and pattern in the event file""" # noqa: D400
+ seriesName = 'HydroForceSeries_' + str(floor) + direction # noqa: N806
+ patternName = 'HydroForcePattern_' + str(floor) + direction # noqa: N806
pattern = {
'name': patternName,
'timeSeries': seriesName,
@@ -56,19 +56,19 @@ def addFloorForceToEvent(patternsArray, force, direction, floor):
patternsArray.append(pattern)
-def addFloorPressure(pressureArray, floor):
- """Add floor pressure in the event file"""
- floorPressure = {'story': str(floor), 'pressure': [0.0, 0.0]}
+def addFloorPressure(pressureArray, floor): # noqa: N802, N803
+ """Add floor pressure in the event file""" # noqa: D400
+ floorPressure = {'story': str(floor), 'pressure': [0.0, 0.0]} # noqa: N806
pressureArray.append(floorPressure)
-def writeEVENT(forces, eventFilePath):
- """This method writes the EVENT.json file"""
- timeSeriesArray = []
- patternsArray = []
- pressureArray = []
- hydroEventJson = {
+def writeEVENT(forces, eventFilePath): # noqa: N802, N803
+ """This method writes the EVENT.json file""" # noqa: D400, D401, D404
+ timeSeriesArray = [] # noqa: N806, F841
+ patternsArray = [] # noqa: N806
+ pressureArray = [] # noqa: N806
+ hydroEventJson = { # noqa: N806
'type': 'Hydro', # Using HydroUQ
'subtype': 'MPM', # Using ClaymoreUW Material Point Method
# "timeSeries": [], # From GeoClawOpenFOAM
@@ -80,21 +80,21 @@ def writeEVENT(forces, eventFilePath):
}
# Creating the event dictionary that will be used to export the EVENT json file
- eventDict = {'randomVariables': [], 'Events': [hydroEventJson]}
+ eventDict = {'randomVariables': [], 'Events': [hydroEventJson]} # noqa: N806
# Adding floor forces
- for floorForces in forces:
+ for floorForces in forces: # noqa: N806
floor = forces.index(floorForces) + 1
addFloorForceToEvent(patternsArray, floorForces.X, 'X', floor)
addFloorForceToEvent(patternsArray, floorForces.Y, 'Y', floor)
# addFloorPressure(pressureArray, floor) # From GeoClawOpenFOAM
- with open(eventFilePath, 'w', encoding='utf-8') as eventsFile:
+ with open(eventFilePath, 'w', encoding='utf-8') as eventsFile: # noqa: PTH123, N806
json.dump(eventDict, eventsFile)
-def GetFloorsCount(BIMFilePath):
- with open(BIMFilePath, encoding='utf-8') as BIMFile:
+def GetFloorsCount(BIMFilePath): # noqa: N802, N803, D103
+ with open(BIMFilePath, encoding='utf-8') as BIMFile: # noqa: PTH123, N806
bim = json.load(BIMFile)
return int(bim['GeneralInformation']['stories'])
@@ -116,12 +116,12 @@ def GetFloorsCount(BIMFilePath):
# Parsing arguments
arguments, unknowns = parser.parse_known_args()
- if arguments.getRV == True:
+ if arguments.getRV == True: # noqa: E712
# Read the number of floors
# Reads BIM file
- floorsCount = GetFloorsCount(arguments.filenameAIM)
+ floorsCount = GetFloorsCount(arguments.filenameAIM) # noqa: N816
forces = []
- for i in range(floorsCount):
- forces.append(FloorForces())
+ for i in range(floorsCount): # noqa: B007
+ forces.append(FloorForces()) # noqa: PERF401
# Write the event file
writeEVENT(forces, arguments.filenameEVENT)
diff --git a/modules/createEVENT/MPM/foam_file_processor.py b/modules/createEVENT/MPM/foam_file_processor.py
index 946fc6916..58399274d 100644
--- a/modules/createEVENT/MPM/foam_file_processor.py
+++ b/modules/createEVENT/MPM/foam_file_processor.py
@@ -1,4 +1,4 @@
-# This script contains functions for reading and writing
+# This script contains functions for reading and writing # noqa: INP001, D100
# OpenFoam dictionaries and filses.
#
import os
@@ -6,18 +6,18 @@
import numpy as np
-def find_keyword_line(dict_lines, keyword):
+def find_keyword_line(dict_lines, keyword): # noqa: D103
start_line = -1
count = 0
for line in dict_lines:
- l = line.lstrip(' ')
+ l = line.lstrip(' ') # noqa: E741
if l.startswith(keyword):
start_line = count
break
- count += 1
+ count += 1 # noqa: SIM113
return start_line
@@ -29,11 +29,11 @@ def write_foam_field(field, file_name):
vectorField,
tensorField,
symmTensorField
- """
- if os.path.exists(file_name):
- os.remove(file_name)
+ """ # noqa: D205, D400, D401
+ if os.path.exists(file_name): # noqa: PTH110
+ os.remove(file_name) # noqa: PTH107
- foam_file = open(file_name, 'w+')
+ foam_file = open(file_name, 'w+') # noqa: SIM115, PTH123
size = np.shape(field)
@@ -54,11 +54,11 @@ def write_foam_field(field, file_name):
def write_scalar_field(field, file_name):
"""Writes a given one dimensional numpy array to OpenFOAM
scalar field format.
- """
- if os.path.exists(file_name):
- os.remove(file_name)
+ """ # noqa: D205, D401
+ if os.path.exists(file_name): # noqa: PTH110
+ os.remove(file_name) # noqa: PTH107
- foam_file = open(file_name, 'w+')
+ foam_file = open(file_name, 'w+') # noqa: SIM115, PTH123
size = np.shape(field)
diff --git a/modules/createEVENT/MPM/post_process_output.py b/modules/createEVENT/MPM/post_process_output.py
index 7ec4f39ac..4021a5915 100644
--- a/modules/createEVENT/MPM/post_process_output.py
+++ b/modules/createEVENT/MPM/post_process_output.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017, The Regents of the University of California (Regents).
+# Copyright (c) 2016-2017, The Regents of the University of California (Regents). # noqa: INP001, D100
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -54,30 +54,30 @@
from scipy import signal
-def readPressureProbes(fileName):
+def readPressureProbes(fileName): # noqa: N802, N803
"""Created on Wed May 16 14:31:42 2018
Reads pressure probe data from OpenFOAM and return the probe location, time, and the pressure
for each time step.
@author: Abiy
- """
+ """ # noqa: D400, D401
probes = []
p = []
time = []
- with open(fileName) as f:
+ with open(fileName) as f: # noqa: PTH123
for line in f:
if line.startswith('#'):
if line.startswith('# Probe'):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
probes.append([float(line[3]), float(line[4]), float(line[5])])
else:
continue
else:
- line = line.split()
+ line = line.split() # noqa: PLW2901
time.append(float(line[0]))
p_probe_i = np.zeros([len(probes)])
for i in range(len(probes)):
@@ -106,7 +106,7 @@ def read_pressure_data(file_names):
time, pressure
Returns the pressure time and pressure data of the connected file.
- """
+ """ # noqa: D205, D401, D404
no_files = len(file_names)
connected_time = [] # Connected array of time
connected_p = [] # connected array of pressure.
@@ -128,7 +128,7 @@ def read_pressure_data(file_names):
index = np.where(time2 > time1[-1])[0][0]
# index += 1
- except:
+ except: # noqa: E722
# sys.exit('Fatal Error!: the pressure files have time gap')
index = 0 # Joint them even if they have a time gap
@@ -136,7 +136,7 @@ def read_pressure_data(file_names):
connected_p = np.concatenate((connected_p, p2[index:]))
time1 = time2
- p1 = p2
+ p1 = p2 # noqa: F841
return probes, connected_time, connected_p
@@ -144,7 +144,7 @@ class PressureData:
"""A class that holds a pressure data and performs the following operations:
- mean and rms pressure coefficients
- peak pressure coefficients
- """
+ """ # noqa: D205, D400
def __init__(
self,
@@ -172,8 +172,8 @@ def __init__(
self.probe_count = np.shape(self.probes)[0]
def __read_cfd_data(self):
- if os.path.isdir(self.path):
- print('Reading from path : %s' % (self.path))
+ if os.path.isdir(self.path): # noqa: PTH112
+ print('Reading from path : %s' % (self.path)) # noqa: T201, UP031
time_names = os.listdir(self.path)
sorted_index = np.argsort(np.float64(time_names)).tolist()
# print(sorted_index)
@@ -181,7 +181,7 @@ def __read_cfd_data(self):
file_names = []
for i in range(len(sorted_index)):
- file_name = os.path.join(self.path, time_names[sorted_index[i]], 'p')
+ file_name = os.path.join(self.path, time_names[sorted_index[i]], 'p') # noqa: PTH118
file_names.append(file_name)
# print(file_names)
@@ -190,30 +190,30 @@ def __read_cfd_data(self):
# self.p = np.transpose(self.p) # OpenFOAM gives p/rho
else:
- print('Cannot find the file path: %s' % (self.path))
+ print('Cannot find the file path: %s' % (self.path)) # noqa: T201, UP031
def __set_time(self):
- if self.start_time != None:
+ if self.start_time != None: # noqa: E711
start_index = int(np.argmax(self.time > self.start_time))
self.time = self.time[start_index:]
# self.cp = self.cp[:,start_index:]
- try:
+ try: # noqa: SIM105
self.p = self.p[:, start_index:]
- except:
+ except: # noqa: S110, E722
pass
- if self.end_time != None:
+ if self.end_time != None: # noqa: E711
end_index = int(np.argmax(self.time > self.end_time))
self.time = self.time[:end_index]
# self.cp = self.cp[:,:end_index]
- try:
+ try: # noqa: SIM105
self.p = self.p[:, :end_index]
- except:
+ except: # noqa: S110, E722
pass
-def von_karman_spectrum(f, Uav, I, L, comp=0):
- psd = np.zeros(len(f))
+def von_karman_spectrum(f, Uav, I, L, comp=0): # noqa: N803, E741, D103
+ psd = np.zeros(len(f)) # noqa: F841
if comp == 0:
return (
@@ -223,7 +223,7 @@ def von_karman_spectrum(f, Uav, I, L, comp=0):
/ np.power(1.0 + 70.8 * np.power(f * L / Uav, 2.0), 5.0 / 6.0)
)
- if comp == 1 or comp == 2:
+ if comp == 1 or comp == 2: # noqa: RET503, PLR1714, PLR2004
return (
4.0
* np.power(I * Uav, 2.0)
@@ -251,7 +251,7 @@ def psd(x, dt, nseg):
freq, spectra
Returns the frequency and spectra of the signal
- """
+ """ # noqa: D205, D401
x_no_mean = x - np.mean(x)
freq, spectra = signal.welch(
x_no_mean, fs=1.0 / dt, nperseg=len(x_no_mean) / nseg
@@ -264,8 +264,8 @@ def write_open_foam_vector_field(p, file_name):
"""Writes a given vector-field (n x 3) array to OpenFOAM 'vectorField'
format.
- """
- f = open(file_name, 'w+')
+ """ # noqa: D205, D401
+ f = open(file_name, 'w+') # noqa: SIM115, PTH123
f.write('%d' % len(p[:, 2]))
f.write('\n(')
for i in range(len(p[:, 2])):
@@ -275,58 +275,58 @@ def write_open_foam_vector_field(p, file_name):
f.close()
-def read_openFoam_scalar_field(file_name):
- """Reads a given vectorField OpenFOAM into numpy (n x 3) array format."""
- sField = []
+def read_openFoam_scalar_field(file_name): # noqa: N802
+ """Reads a given vectorField OpenFOAM into numpy (n x 3) array format.""" # noqa: D401
+ sField = [] # noqa: N806
- with open(file_name) as f:
+ with open(file_name) as f: # noqa: PTH123
itrf = iter(f)
next(itrf)
for line in itrf:
- if line.startswith('(') or line.startswith(')'):
+ if line.startswith('(') or line.startswith(')'): # noqa: PIE810
continue
- else:
- line = line.split()
+ else: # noqa: RET507
+ line = line.split() # noqa: PLW2901
sField.append(float(line[0]))
- sField = np.asarray(sField, dtype=np.float32)
+ sField = np.asarray(sField, dtype=np.float32) # noqa: N806
- return sField
+ return sField # noqa: RET504
-def read_openFoam_vector_field(file_name):
- """Reads a given vectorField OpenFOAM into numpy (n x 3) array format."""
- vField = []
+def read_openFoam_vector_field(file_name): # noqa: N802
+ """Reads a given vectorField OpenFOAM into numpy (n x 3) array format.""" # noqa: D401
+ vField = [] # noqa: N806
- with open(file_name) as f:
+ with open(file_name) as f: # noqa: PTH123
for line in f:
if line.startswith('('):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
- if len(line) < 3:
+ if len(line) < 3: # noqa: PLR2004
continue
vField.append([float(line[0]), float(line[1]), float(line[2])])
- vField = np.asarray(vField, dtype=np.float32)
+ vField = np.asarray(vField, dtype=np.float32) # noqa: N806
- return vField
+ return vField # noqa: RET504
-def read_openFoam_tensor_field(file_name):
- """Reads a given vectorField OpenFOAM into numpy (n x 3) array format."""
- vField = []
+def read_openFoam_tensor_field(file_name): # noqa: N802
+ """Reads a given vectorField OpenFOAM into numpy (n x 3) array format.""" # noqa: D401
+ vField = [] # noqa: N806
row_count = 9
- with open(file_name) as f:
+ with open(file_name) as f: # noqa: PTH123
for line in f:
if line.startswith('('):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
if len(line) < row_count:
continue
@@ -338,23 +338,23 @@ def read_openFoam_tensor_field(file_name):
vField.append(row)
- vField = np.asarray(vField, dtype=np.float32)
+ vField = np.asarray(vField, dtype=np.float32) # noqa: N806
- return vField
+ return vField # noqa: RET504
-def read_openFoam_symmetric_tensor_field(file_name):
- """Reads a given vectorField OpenFOAM into numpy (n x 3) array format."""
- vField = []
+def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802
+ """Reads a given vectorField OpenFOAM into numpy (n x 3) array format.""" # noqa: D401
+ vField = [] # noqa: N806
row_count = 6
- with open(file_name) as f:
+ with open(file_name) as f: # noqa: PTH123
for line in f:
if line.startswith('('):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
if len(line) < row_count:
continue
@@ -365,9 +365,9 @@ def read_openFoam_symmetric_tensor_field(file_name):
vField.append(row)
- vField = np.asarray(vField, dtype=np.float32)
+ vField = np.asarray(vField, dtype=np.float32) # noqa: N806
- return vField
+ return vField # noqa: RET504
def read_velocity_data(path):
@@ -385,35 +385,35 @@ def read_velocity_data(path):
time, pressure
Returns the velocity time and velocity data of the connected file.
- """
+ """ # noqa: D205, D401, D404
num_files = len(path)
connected_time = [] # Connected array of time
- connected_U = [] # connected array of pressure.
+ connected_U = [] # connected array of pressure. # noqa: N806
time1 = []
- U1 = []
+ U1 = [] # noqa: N806
time2 = []
- U2 = []
+ U2 = [] # noqa: N806
probes = []
for i in range(num_files):
- probes, time2, U2 = read_velocity_probes(path[i])
+ probes, time2, U2 = read_velocity_probes(path[i]) # noqa: N806
if i != 0:
try:
index = np.where(time2 > time1[-1])[0][0]
- except:
+ except: # noqa: E722
# sys.exit('Fatal Error!: the pressure files have time gap')
index = 0 # Join them even if they have a time gap
connected_time = np.concatenate((connected_time, time2[index:]))
- connected_U = np.concatenate((connected_U, U2[index:]))
+ connected_U = np.concatenate((connected_U, U2[index:])) # noqa: N806
else:
connected_time = time2
- connected_U = U2
+ connected_U = U2 # noqa: N806
time1 = time2
- U1 = U2
+ U1 = U2 # noqa: N806, F841
shape = np.shape(connected_U)
- U = np.zeros((shape[1], shape[2], shape[0]))
+ U = np.zeros((shape[1], shape[2], shape[0])) # noqa: N806
for i in range(shape[1]):
for j in range(shape[2]):
@@ -421,33 +421,33 @@ def read_velocity_data(path):
return probes, connected_time, U
-def read_velocity_probes(fileName):
+def read_velocity_probes(fileName): # noqa: N803
"""Created on Wed May 16 14:31:42 2018
Reads velocity probe data from OpenFOAM and return the probe location, time,
and the velocity vector for each time step.
- """
+ """ # noqa: D400, D401
probes = []
- U = []
+ U = [] # noqa: N806
time = []
- with open(fileName) as f:
+ with open(fileName) as f: # noqa: PTH123
for line in f:
if line.startswith('#'):
if line.startswith('# Probe'):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
probes.append([float(line[3]), float(line[4]), float(line[5])])
else:
continue
else:
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
try:
time.append(float(line[0]))
- except:
+ except: # noqa: S112, E722
continue
u_probe_i = np.zeros([len(probes), 3])
for i in range(len(probes)):
@@ -460,13 +460,13 @@ def read_velocity_probes(fileName):
probes = np.asarray(probes, dtype=np.float32)
time = np.asarray(time, dtype=np.float32)
- U = np.asarray(U, dtype=np.float32)
+ U = np.asarray(U, dtype=np.float32) # noqa: N806
return probes, time, U
def calculate_length_scale(u, uav, dt, min_corr=0.0):
- """Calculates the length scale of a velocity time history given."""
+ """Calculates the length scale of a velocity time history given.""" # noqa: D401
u = u - np.mean(u)
corr = signal.correlate(u, u, mode='full')
@@ -479,12 +479,12 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0):
corr = corr[:loc]
- L = uav * np.trapz(corr, dx=dt)
+ L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806
- return L
+ return L # noqa: RET504
-def psd(x, dt, nseg):
+def psd(x, dt, nseg): # noqa: F811
"""Calculates the power spectral density of a given signal using the welch
method.
@@ -502,7 +502,7 @@ def psd(x, dt, nseg):
freq, spectra
Returns the frequency and spectra of the signal
- """
+ """ # noqa: D205, D401
x_no_mean = x - np.mean(x)
freq, spectra = signal.welch(
x_no_mean, fs=1.0 / dt, nperseg=len(x_no_mean) / nseg
@@ -516,13 +516,13 @@ class VelocityData:
- mean velocity profile
- turbulence intensity profiles
- integral scale of turbulence profiles
- """
+ """ # noqa: D205, D400
def __init__(
self,
path,
sampling_rate=400,
- filter_data=False,
+ filter_data=False, # noqa: FBT002
filter_freq=400,
start_time=None,
end_time=None,
@@ -551,14 +551,14 @@ def __init__(
self.__calculate_all()
def __read_cfd_data(self):
- if os.path.isdir(self.path):
- print('Reading from path : %s' % (self.path))
+ if os.path.isdir(self.path): # noqa: PTH112
+ print('Reading from path : %s' % (self.path)) # noqa: T201, UP031
time_names = os.listdir(self.path)
sorted_index = np.argsort(np.float64(time_names)).tolist()
file_names = []
for i in range(len(sorted_index)):
- file_name = os.path.join(self.path, time_names[sorted_index[i]], 'U')
+ file_name = os.path.join(self.path, time_names[sorted_index[i]], 'U') # noqa: PTH118
file_names.append(file_name)
self.probes, self.time, self.U = read_velocity_data(file_names)
@@ -576,14 +576,14 @@ def __read_cfd_data(self):
# Coefficient of variation
cv = np.std(np.diff(self.time)) / np.mean(np.diff(self.time))
- if cv > 1.0e-4:
+ if cv > 1.0e-4: # noqa: PLR2004
self.__adjust_time_step()
else:
- print('Cannot find the file path: %s' % (self.path))
+ print('Cannot find the file path: %s' % (self.path)) # noqa: T201, UP031
def __adjust_time_step(self):
- if self.resample_dt == None:
+ if self.resample_dt == None: # noqa: E711
dt = np.mean(np.diff(self.time))
else:
dt = self.resample_dt
@@ -592,7 +592,7 @@ def __adjust_time_step(self):
shape = np.shape(self.U)
- U = np.zeros((shape[0], shape[1], len(time)))
+ U = np.zeros((shape[0], shape[1], len(time))) # noqa: N806
for i in range(shape[0]):
for j in range(shape[1]):
@@ -611,12 +611,12 @@ def __filter_signal(self):
self.U[i, j, :] = signal.sosfilt(low_pass, self.U[i, j, :])
def __set_time(self):
- if self.start_time != None:
+ if self.start_time != None: # noqa: E711
start_index = int(np.argmax(self.time > self.start_time))
self.time = self.time[start_index:]
self.U = self.U[:, :, start_index:]
- if self.end_time != None:
+ if self.end_time != None: # noqa: E711
end_index = int(np.argmax(self.time > self.end_time))
self.time = self.time[:end_index]
self.U = self.U[:, :, :end_index]
@@ -654,7 +654,7 @@ def __calculate_all(self):
self.uv_bar[i] = np.cov(self.U[i, 0, :], self.U[i, 1, :])[0, 1]
self.uw_bar[i] = np.cov(self.U[i, 0, :], self.U[i, 2, :])[0, 1]
- def get_Uav(self, z):
+ def get_Uav(self, z): # noqa: N802, D102
from scipy import interpolate
f = interpolate.interp1d(self.z, self.Uav)
@@ -670,48 +670,48 @@ def copy_vtk_planes_and_order(input_path, output_path, field):
input_path: path of the vtk files in the postProcessing directory
ouput_path: path to write the vtk files in order
- """
- if not os.path.isdir(input_path):
- print(f'Cannot find the path for: {input_path}')
+ """ # noqa: D205, D401, D404
+ if not os.path.isdir(input_path): # noqa: PTH112
+ print(f'Cannot find the path for: {input_path}') # noqa: T201
return
- if not os.path.isdir(output_path):
- print(f'Cannot find the path for: {output_path}')
+ if not os.path.isdir(output_path): # noqa: PTH112
+ print(f'Cannot find the path for: {output_path}') # noqa: T201
return
- print(f'Reading from path: {input_path}')
+ print(f'Reading from path: {input_path}') # noqa: T201
time_names = os.listdir(input_path)
times = np.float64(time_names)
sorted_index = np.argsort(times).tolist()
n_times = len(times)
- print(f'\tNumber of time directories: {n_times} ')
- print(f'\tTime step: {np.mean(np.diff(times)):.4f} s')
- print(
+ print(f'\tNumber of time directories: {n_times} ') # noqa: T201
+ print(f'\tTime step: {np.mean(np.diff(times)):.4f} s') # noqa: T201
+ print( # noqa: T201
f'\tTotal duration: {times[sorted_index[-1]] - times[sorted_index[0]]:.4f} s'
)
for i in range(n_times):
index = sorted_index[i]
- pathi = os.path.join(input_path, time_names[index])
+ pathi = os.path.join(input_path, time_names[index]) # noqa: PTH118
os.listdir(pathi)
new_name = f'{field}_T{i + 1:04d}.vtk'
for f in os.listdir(pathi):
if f.endswith('.vtk'):
- new_path = os.path.join(output_path, new_name)
- old_path = os.path.join(pathi, f)
+ new_path = os.path.join(output_path, new_name) # noqa: PTH118
+ old_path = os.path.join(pathi, f) # noqa: PTH118
shutil.copyfile(old_path, new_path)
- print(f'Copied path: {old_path}')
+ print(f'Copied path: {old_path}') # noqa: T201
-def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
+def plot_wind_profiles_and_spectra(case_path, output_path, prof_name): # noqa: D103
# Read JSON data
- json_path = os.path.join(
+ json_path = os.path.join( # noqa: PTH118
case_path, 'constant', 'simCenter', 'input', 'EmptyDomainCFD.json'
)
- with open(json_path) as json_file:
+ with open(json_path) as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -719,7 +719,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
ref_h = wc_data['referenceHeight']
- prof_path = os.path.join(case_path, 'postProcessing', prof_name)
+ prof_path = os.path.join(case_path, 'postProcessing', prof_name) # noqa: PTH118
prof = VelocityData(prof_path, start_time=None, end_time=None)
@@ -736,26 +736,26 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
prof_np[:, 8] = prof.L[:, 2]
# Read the target wind profile data
- tar_path = os.path.join(case_path, 'constant', 'boundaryData', 'inlet')
+ tar_path = os.path.join(case_path, 'constant', 'boundaryData', 'inlet') # noqa: PTH118
- tar_p = read_openFoam_vector_field(os.path.join(tar_path, 'points'))
- tar_U = read_openFoam_scalar_field(os.path.join(tar_path, 'U'))
- tar_R = read_openFoam_symmetric_tensor_field(os.path.join(tar_path, 'R'))
- tar_L = read_openFoam_tensor_field(os.path.join(tar_path, 'L'))
+ tar_p = read_openFoam_vector_field(os.path.join(tar_path, 'points')) # noqa: PTH118
+ tar_U = read_openFoam_scalar_field(os.path.join(tar_path, 'U')) # noqa: PTH118, N806
+ tar_R = read_openFoam_symmetric_tensor_field(os.path.join(tar_path, 'R')) # noqa: PTH118, N806
+ tar_L = read_openFoam_tensor_field(os.path.join(tar_path, 'L')) # noqa: PTH118, N806
- tar_U_ref = np.interp(ref_h, tar_p[:, 2], tar_U)
+ tar_U_ref = np.interp(ref_h, tar_p[:, 2], tar_U) # noqa: N806, F841
- tar_Iu = np.sqrt(tar_R[:, 0]) / tar_U
- tar_Iv = np.sqrt(tar_R[:, 3]) / tar_U
- tar_Iw = np.sqrt(tar_R[:, 5]) / tar_U
+ tar_Iu = np.sqrt(tar_R[:, 0]) / tar_U # noqa: N806
+ tar_Iv = np.sqrt(tar_R[:, 3]) / tar_U # noqa: N806
+ tar_Iw = np.sqrt(tar_R[:, 5]) / tar_U # noqa: N806
tar_uw = tar_R[:, 2]
- tar_Lu = tar_L[:, 0]
- tar_Lv = tar_L[:, 3]
- tar_Lw = tar_L[:, 6]
+ tar_Lu = tar_L[:, 0] # noqa: N806
+ tar_Lv = tar_L[:, 3] # noqa: N806
+ tar_Lw = tar_L[:, 6] # noqa: N806
- tar_I = np.zeros((3, len(tar_Iu)))
- tar_L = np.zeros((3, len(tar_Lu)))
+ tar_I = np.zeros((3, len(tar_Iu))) # noqa: N806
+ tar_L = np.zeros((3, len(tar_Lu))) # noqa: N806
tar_I[0, :] = tar_Iu
tar_I[1, :] = tar_Iv
@@ -788,7 +788,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_U,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -799,7 +799,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 1],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -833,7 +833,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Iu,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -844,7 +844,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 2],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -877,7 +877,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Iw,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -888,7 +888,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 3],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -921,7 +921,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Iw,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -932,7 +932,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 4],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -965,7 +965,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_uw,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -976,7 +976,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 5],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1009,7 +1009,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Lu,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -1020,7 +1020,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 6],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1053,7 +1053,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Lv,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -1064,7 +1064,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 7],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1097,7 +1097,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Lw,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -1108,7 +1108,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 8],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1139,7 +1139,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
fig.update_layout(height=850, width=1200, title_text='', showlegend=False)
fig.show()
fig.write_html(
- os.path.join(output_path, prof_name + '.html'),
+ os.path.join(output_path, prof_name + '.html'), # noqa: PTH118
include_mathjax='cdn',
)
@@ -1170,8 +1170,8 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
vertical_spacing=0.15,
)
- U_ref_prof = np.interp(spec_h[i], prof_np[:, 0], prof_np[:, 1])
- U_ref_tar = np.interp(spec_h[i], tar_p[:, 2], tar_U)
+ U_ref_prof = np.interp(spec_h[i], prof_np[:, 0], prof_np[:, 1]) # noqa: N806
+ U_ref_tar = np.interp(spec_h[i], tar_p[:, 2], tar_U) # noqa: N806
# Plot each component
for j in range(ncomp):
@@ -1185,8 +1185,8 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
spec = freq * spec / u_var
freq = freq * spec_h[i] / U_ref_prof
- tar_Iz = tar_I[j, loc_tar]
- tar_Lz = tar_L[j, loc_tar]
+ tar_Iz = tar_I[j, loc_tar] # noqa: N806
+ tar_Lz = tar_L[j, loc_tar] # noqa: N806
vonk_f = np.logspace(np.log10(f_min), np.log10(f_max), 200)
vonk_psd = von_karman_spectrum(vonk_f, U_ref_tar, tar_Iz, tar_Lz, j)
@@ -1198,7 +1198,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=freq,
y=spec,
- line=dict(color='firebrick', width=1.5),
+ line=dict(color='firebrick', width=1.5), # noqa: C408
mode='lines',
name=prof_name,
),
@@ -1209,7 +1209,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=vonk_f,
y=vonk_psd,
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target(von Karman)',
),
@@ -1240,15 +1240,15 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
fig.update_layout(height=450, width=1500, title_text='', showlegend=False)
fig.show()
fig.write_html(
- os.path.join(
+ os.path.join( # noqa: PTH118
output_path, 'spectra_' + prof_name + '_H' + str(1 + i) + '.html'
),
include_mathjax='cdn',
)
-def plot_pressure_profile(case_path, output_path, prof_name):
- prof_path = os.path.join(case_path, 'postProcessing', prof_name)
+def plot_pressure_profile(case_path, output_path, prof_name): # noqa: D103
+ prof_path = os.path.join(case_path, 'postProcessing', prof_name) # noqa: PTH118
prof = PressureData(
prof_path, start_time=1.0, end_time=None, u_ref=0.0, rho=1.25, p_ref=0.0
@@ -1271,7 +1271,7 @@ def plot_pressure_profile(case_path, output_path, prof_name):
go.Scatter(
x=prof.x - np.min(prof.x),
y=std_p,
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1303,7 +1303,7 @@ def plot_pressure_profile(case_path, output_path, prof_name):
fig.update_layout(height=400, width=800, title_text='', showlegend=False)
fig.show()
fig.write_html(
- os.path.join(output_path, 'pressure_' + prof_name + '.html'),
+ os.path.join(output_path, 'pressure_' + prof_name + '.html'), # noqa: PTH118
include_mathjax='cdn',
)
@@ -1325,15 +1325,15 @@ def plot_pressure_profile(case_path, output_path, prof_name):
case_path = arguments.case
- print('Case full path: ', case_path)
+ print('Case full path: ', case_path) # noqa: T201
# prof_name = sys.argv[2]
# Read JSON data
- json_path = os.path.join(
+ json_path = os.path.join( # noqa: PTH118
case_path, 'constant', 'simCenter', 'input', 'EmptyDomainCFD.json'
)
- with open(json_path) as json_file:
+ with open(json_path) as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1342,12 +1342,12 @@ def plot_pressure_profile(case_path, output_path, prof_name):
wind_profiles = rm_data['windProfiles']
vtk_planes = rm_data['vtkPlanes']
- prof_output_path = os.path.join(
+ prof_output_path = os.path.join( # noqa: PTH118
case_path, 'constant', 'simCenter', 'output', 'windProfiles'
)
# Check if it exists and remove files
- if os.path.exists(prof_output_path):
+ if os.path.exists(prof_output_path): # noqa: PTH110
shutil.rmtree(prof_output_path)
# Create new path
@@ -1357,8 +1357,8 @@ def plot_pressure_profile(case_path, output_path, prof_name):
for prof in wind_profiles:
name = prof['name']
field = prof['field']
- print(name)
- print(field)
+ print(name) # noqa: T201
+ print(field) # noqa: T201
if field == 'Velocity':
plot_wind_profiles_and_spectra(case_path, prof_output_path, name)
@@ -1371,8 +1371,8 @@ def plot_pressure_profile(case_path, output_path, prof_name):
name = pln['name']
field = pln['field']
- vtk_path = os.path.join(case_path, 'postProcessing', name)
- vtk_path_renamed = os.path.join(
+ vtk_path = os.path.join(case_path, 'postProcessing', name) # noqa: PTH118
+ vtk_path_renamed = os.path.join( # noqa: PTH118
case_path, 'postProcessing', name + '_renamed'
)
@@ -1381,5 +1381,5 @@ def plot_pressure_profile(case_path, output_path, prof_name):
copy_vtk_planes_and_order(vtk_path, vtk_path_renamed, field)
# Check if it exists and remove files
- if os.path.exists(vtk_path):
+ if os.path.exists(vtk_path): # noqa: PTH110
shutil.rmtree(vtk_path)
diff --git a/modules/createEVENT/MPM/post_process_sensors.py b/modules/createEVENT/MPM/post_process_sensors.py
index 0789838b4..ccba2f779 100644
--- a/modules/createEVENT/MPM/post_process_sensors.py
+++ b/modules/createEVENT/MPM/post_process_sensors.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017, The Regents of the University of California (Regents).
+# Copyright (c) 2016-2017, The Regents of the University of California (Regents). # noqa: INP001
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -58,14 +58,14 @@
if __name__ == '__main__':
# CLI parser
input_args = sys.argv[1:]
- print(
+ print( # noqa: T201
'post_process_sensors.py - Backend-script post_process_sensors.py reached main. Starting...'
)
- print(
+ print( # noqa: T201
'post_process_sensors.py - Backend-script post_process_sensors.py running: '
+ str(sys.argv[0])
)
- print(
+ print( # noqa: T201
'post_process_sensors.py - Backend-script post_process_sensors.py received input args: '
+ str(input_args)
)
@@ -88,9 +88,9 @@
# sensor_data_dir = arguments.input_directory
# output_dir = arguments.output_directory
# sensor_files = (arguments.files).split(',')
- print('Sensor data directory: ', sensor_data_dir)
- print('Output directory: ', output_dir)
- print('Sensor files: ', sensor_files)
+ print('Sensor data directory: ', sensor_data_dir) # noqa: T201
+ print('Output directory: ', output_dir) # noqa: T201
+ print('Sensor files: ', sensor_files) # noqa: T201
# json_path = os.path.join(case_path, "constant", "simCenter", "input", "MPM.json")
# with open(json_path) as json_file:
# json_data = json.load(json_file)
@@ -106,25 +106,25 @@
sensor_data = {}
for sensor_file in sensor_files:
# Remove any leading '/' from the sensor file
- sensor_file = sensor_file.lstrip('/')
+ sensor_file = sensor_file.lstrip('/') # noqa: PLW2901
# Remove whitespace from the sensor file
- sensor_file = sensor_file.strip()
- sensor_file = sensor_file.split(
+ sensor_file = sensor_file.strip() # noqa: PLW2901
+ sensor_file = sensor_file.split( # noqa: PLW2901
'.'
) # Split the sensor file by the '.' character
if sensor_file[-1] != 'csv':
- print(
+ print( # noqa: T201
'Error: Sensor file is not a csv file. Please provide a csv file. Will skip this file: '
+ sensor_file[0]
+ '.'
+ sensor_file[-1]
)
continue
- sensor_file = sensor_file[
+ sensor_file = sensor_file[ # noqa: PLW2901
0
] # Get the first part of the sensor file, which is the sensor name
sensor_data[sensor_file] = pd.read_csv(
- os.path.join(sensor_data_dir, sensor_file + '.csv'),
+ os.path.join(sensor_data_dir, sensor_file + '.csv'), # noqa: PTH118
header=None,
skiprows=1,
delimiter=',',
@@ -136,7 +136,7 @@
please_convert_to_date_time = False # May want to use this later, as wave-flumes tend to report time in date-time formats
if (
- please_convert_to_date_time == True
+ please_convert_to_date_time == True # noqa: E712
and sensor_data[sensor_file]['time'].dtype != 'datetime64[ns]'
):
sensor_data[sensor_file]['time'] = pd.to_datetime(
@@ -144,24 +144,24 @@
)
# Make sure the output directory exists, and save the sensor raw data to the output directory if they aren't already there
- if not os.path.exists(output_dir):
- print(
+ if not os.path.exists(output_dir): # noqa: PTH110
+ print( # noqa: T201
'Output directory not found... Creating output directory: '
+ output_dir
+ '...'
)
- os.makedirs(output_dir)
+ os.makedirs(output_dir) # noqa: PTH103
if output_dir != sensor_data_dir:
for sensor_name in sensor_names:
- print('Save ' + os.path.join(output_dir, sensor_name) + '.csv' + '...')
+ print('Save ' + os.path.join(output_dir, sensor_name) + '.csv' + '...') # noqa: T201, PTH118
sensor_data[sensor_name].to_csv(
- os.path.join(output_dir, sensor_name + '.csv'),
+ os.path.join(output_dir, sensor_name + '.csv'), # noqa: PTH118
index=False,
)
# Plot the sensor data, and save the plots to the output directory (html and png files)
for sensor_name in sensor_names:
- print('Plotting ' + sensor_name + '...')
+ print('Plotting ' + sensor_name + '...') # noqa: T201
fig, axes = plt.subplots(1, 1)
sensor_name_png = sensor_name + '.png'
sensor_name_html = sensor_name + '.webp'
@@ -172,21 +172,21 @@
)
axes.set_xlabel('Time [s]')
axes.set_ylabel('Sensor Measurement')
- print('Save ' + os.path.join(output_dir, sensor_name_png) + '...')
+ print('Save ' + os.path.join(output_dir, sensor_name_png) + '...') # noqa: T201, PTH118
plt.savefig(
- os.path.join(output_dir, sensor_name_png),
+ os.path.join(output_dir, sensor_name_png), # noqa: PTH118
dpi=300,
bbox_inches='tight',
) # save the plot as a png file
- print('Save ' + os.path.join(output_dir, sensor_name_html) + '...')
+ print('Save ' + os.path.join(output_dir, sensor_name_html) + '...') # noqa: T201, PTH118
plt.savefig(
- os.path.join(output_dir, sensor_name_html),
+ os.path.join(output_dir, sensor_name_html), # noqa: PTH118
dpi=300,
bbox_inches='tight',
) # save the plot as an html file
plt.show()
plt.close()
- print(
+ print( # noqa: T201
'post_process_sensors.py - Backend-script post_process_sensors.py reached end of main. Finished.'
)
diff --git a/modules/createEVENT/MPM/setup_case.py b/modules/createEVENT/MPM/setup_case.py
index 4dbfca60e..d0ec247b7 100644
--- a/modules/createEVENT/MPM/setup_case.py
+++ b/modules/createEVENT/MPM/setup_case.py
@@ -1,7 +1,7 @@
"""This script writes BC and initial condition, and setups the OpenFoam case
directory.
-"""
+""" # noqa: INP001, D205, D404
import json
import os
@@ -11,9 +11,9 @@
import numpy as np
-def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
+def write_block_mesh_dict(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -22,12 +22,12 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
boundary_data = json_data['boundaryConditions']
origin = np.array(geom_data['origin'])
- scale = geom_data['geometricScale']
+ scale = geom_data['geometricScale'] # noqa: F841
- Lx = geom_data['domainLength']
- Ly = geom_data['domainWidth']
- Lz = geom_data['domainHeight']
- Lf = geom_data['fetchLength']
+ Lx = geom_data['domainLength'] # noqa: N806
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lz = geom_data['domainHeight'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
x_cells = mesh_data['xNumCells']
y_cells = mesh_data['yNumCells']
@@ -69,7 +69,7 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
z_max = z_min + Lz
# Open the template blockMeshDict (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/blockMeshDictTemplate')
+ dict_file = open(template_dict_path + '/blockMeshDictTemplate') # noqa: SIM115, PTH123
# Export to OpenFOAM probe format
dict_lines = dict_file.readlines()
@@ -114,18 +114,18 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
write_file_name = case_path + '/system/blockMeshDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
+def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -133,16 +133,16 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
geom_data = json_data['GeometricData']
- Lx = geom_data['domainLength']
- Ly = geom_data['domainWidth']
- Lz = geom_data['domainHeight']
- Lf = geom_data['fetchLength']
+ Lx = geom_data['domainLength'] # noqa: N806
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lz = geom_data['domainHeight'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
origin = np.array(geom_data['origin'])
num_cells_between_levels = mesh_data['numCellsBetweenLevels']
resolve_feature_angle = mesh_data['resolveFeatureAngle']
- num_processors = mesh_data['numProcessors']
+ num_processors = mesh_data['numProcessors'] # noqa: F841
refinement_boxes = mesh_data['refinementBoxes']
@@ -150,14 +150,14 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
y_min = -Ly / 2.0 - origin[1]
z_min = 0.0 - origin[2]
- x_max = x_min + Lx
+ x_max = x_min + Lx # noqa: F841
y_max = y_min + Ly
z_max = z_min + Lz
inside_point = [x_min + Lf / 2.0, (y_min + y_max) / 2.0, (z_min + z_max) / 2.0]
# Open the template blockMeshDict (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/snappyHexMeshDictTemplate')
+ dict_file = open(template_dict_path + '/snappyHexMeshDictTemplate') # noqa: SIM115, PTH123
# Export to OpenFOAM probe format
dict_lines = dict_file.readlines()
@@ -228,10 +228,10 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/snappyHexMeshDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
@@ -240,9 +240,9 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
def write_boundary_data_files(input_json_path, case_path):
"""This functions writes wind profile files in "constant/boundaryData/inlet"
if TInf options are used for the simulation.
- """
+ """ # noqa: D205, D401, D404
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -259,8 +259,8 @@ def write_boundary_data_files(input_json_path, case_path):
origin = np.array(geom_data['origin'])
- Ly = geom_data['domainWidth']
- Lf = geom_data['fetchLength']
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
x_min = -Lf - origin[0]
y_min = -Ly / 2.0 - origin[1]
@@ -286,25 +286,25 @@ def write_boundary_data_files(input_json_path, case_path):
foam.write_foam_field(wind_profiles[:, 8:17], bd_path + 'L')
-def write_U_file(input_json_path, template_dict_path, case_path):
+def write_U_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- inlet_BC_type = boundary_data['inletBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
+ inlet_BC_type = boundary_data['inletBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/UFileTemplate')
+ dict_file = open(template_dict_path + '/UFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -400,28 +400,28 @@ def write_U_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/U'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_p_file(input_json_path, template_dict_path, case_path):
+def write_p_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/pFileTemplate')
+ dict_file = open(template_dict_path + '/pFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -485,34 +485,34 @@ def write_p_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/p'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_nut_file(input_json_path, template_dict_path, case_path):
+def write_nut_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
# wind_speed = wind_data['roofHeightWindSpeed']
# building_height = wind_data['buildingHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/nutFileTemplate')
+ dict_file = open(template_dict_path + '/nutFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -590,34 +590,34 @@ def write_nut_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/nut'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_epsilon_file(input_json_path, template_dict_path, case_path):
+def write_epsilon_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/epsilonFileTemplate')
+ dict_file = open(template_dict_path + '/epsilonFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -708,34 +708,34 @@ def write_epsilon_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/epsilon'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_k_file(input_json_path, template_dict_path, case_path):
+def write_k_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/kFileTemplate')
+ dict_file = open(template_dict_path + '/kFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -743,7 +743,7 @@ def write_k_file(input_json_path, template_dict_path, case_path):
# BC and initial condition (you may need to scale to model scale)
# k0 = 1.3 #not in model scale
- I = 0.1
+ I = 0.1 # noqa: N806, E741
k0 = 1.5 * (I * wind_speed) ** 2
# Internal Field #########################
@@ -822,18 +822,18 @@ def write_k_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/k'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_controlDict_file(input_json_path, template_dict_path, case_path):
+def write_controlDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -858,7 +858,7 @@ def write_controlDict_file(input_json_path, template_dict_path, case_path):
purge_write = 3
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/controlDictTemplate')
+ dict_file = open(template_dict_path + '/controlDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -931,18 +931,18 @@ def write_controlDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/controlDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_fvSolution_file(input_json_path, template_dict_path, case_path):
+def write_fvSolution_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -955,7 +955,7 @@ def write_fvSolution_file(input_json_path, template_dict_path, case_path):
num_outer_correctors = ns_data['numOuterCorrectors']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/fvSolutionTemplate')
+ dict_file = open(template_dict_path + '/fvSolutionTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -990,18 +990,18 @@ def write_fvSolution_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/fvSolution'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_pressure_probes_file(input_json_path, template_dict_path, case_path):
+def write_pressure_probes_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1011,7 +1011,7 @@ def write_pressure_probes_file(input_json_path, template_dict_path, case_path):
pressure_write_interval = rm_data['pressureWriteInterval']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/probeTemplate')
+ dict_file = open(template_dict_path + '/probeTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1036,18 +1036,18 @@ def write_pressure_probes_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/pressureSamplingPoints'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
+def write_wind_profiles_file(input_json_path, template_dict_path, case_path): # noqa: C901, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1061,7 +1061,7 @@ def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
write_interval = rm_data['profileWriteInterval']
start_time = rm_data['profileStartTime']
- if rm_data['monitorWindProfile'] == False:
+ if rm_data['monitorWindProfile'] == False: # noqa: E712
return
if len(wind_profiles) == 0:
@@ -1070,7 +1070,7 @@ def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
# Write dict files for wind profiles
for prof in wind_profiles:
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/probeTemplate')
+ dict_file = open(template_dict_path + '/probeTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1137,18 +1137,18 @@ def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/' + name
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
+def write_vtk_plane_file(input_json_path, template_dict_path, case_path): # noqa: C901, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1160,7 +1160,7 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
vtk_planes = rm_data['vtkPlanes']
write_interval = rm_data['vtkWriteInterval']
- if rm_data['monitorVTKPlane'] == False:
+ if rm_data['monitorVTKPlane'] == False: # noqa: E712
return
if len(vtk_planes) == 0:
@@ -1169,7 +1169,7 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
# Write dict files for wind profiles
for pln in vtk_planes:
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/vtkPlaneTemplate')
+ dict_file = open(template_dict_path + '/vtkPlaneTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1238,30 +1238,30 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/' + name
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_momentumTransport_file(input_json_path, template_dict_path, case_path):
+def write_momentumTransport_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
turb_data = json_data['turbulenceModeling']
simulation_type = turb_data['simulationType']
- RANS_type = turb_data['RANSModelType']
- LES_type = turb_data['LESModelType']
- DES_type = turb_data['DESModelType']
+ RANS_type = turb_data['RANSModelType'] # noqa: N806
+ LES_type = turb_data['LESModelType'] # noqa: N806
+ DES_type = turb_data['DESModelType'] # noqa: N806
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/momentumTransportTemplate')
+ dict_file = open(template_dict_path + '/momentumTransportTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1293,18 +1293,18 @@ def write_momentumTransport_file(input_json_path, template_dict_path, case_path)
# Write edited dict to file
write_file_name = case_path + '/constant/momentumTransport'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_physicalProperties_file(input_json_path, template_dict_path, case_path):
+def write_physicalProperties_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1313,7 +1313,7 @@ def write_physicalProperties_file(input_json_path, template_dict_path, case_path
kinematic_viscosity = wc_data['kinematicViscosity']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/physicalPropertiesTemplate')
+ dict_file = open(template_dict_path + '/physicalPropertiesTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1325,18 +1325,18 @@ def write_physicalProperties_file(input_json_path, template_dict_path, case_path
# Write edited dict to file
write_file_name = case_path + '/constant/physicalProperties'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_transportProperties_file(input_json_path, template_dict_path, case_path):
+def write_transportProperties_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1345,7 +1345,7 @@ def write_transportProperties_file(input_json_path, template_dict_path, case_pat
kinematic_viscosity = wc_data['kinematicViscosity']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/transportPropertiesTemplate')
+ dict_file = open(template_dict_path + '/transportPropertiesTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1357,18 +1357,18 @@ def write_transportProperties_file(input_json_path, template_dict_path, case_pat
# Write edited dict to file
write_file_name = case_path + '/constant/transportProperties'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
+def write_fvSchemes_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1377,7 +1377,7 @@ def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
simulation_type = turb_data['simulationType']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + f'/fvSchemesTemplate{simulation_type}')
+ dict_file = open(template_dict_path + f'/fvSchemesTemplate{simulation_type}') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1385,18 +1385,18 @@ def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/fvSchemes'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
+def write_decomposeParDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1405,7 +1405,7 @@ def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
num_processors = ns_data['numProcessors']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/decomposeParDictTemplate')
+ dict_file = open(template_dict_path + '/decomposeParDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1425,18 +1425,18 @@ def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/decomposeParDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
+def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
fmax = 200.0
@@ -1452,7 +1452,7 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
duration = duration * 1.010
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/DFSRTurbDictTemplate')
+ dict_file = open(template_dict_path + '/DFSRTurbDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1480,10 +1480,10 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/constant/DFSRTurbDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
@@ -1508,7 +1508,7 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
# set up goes here
# Read JSON data
- with open(input_json_path + '/EmptyDomainCFD.json') as json_file:
+ with open(input_json_path + '/EmptyDomainCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
diff --git a/modules/createEVENT/NNGM/NNGM.py b/modules/createEVENT/NNGM/NNGM.py
index 030dda215..6d6c9ece6 100644
--- a/modules/createEVENT/NNGM/NNGM.py
+++ b/modules/createEVENT/NNGM/NNGM.py
@@ -1,4 +1,4 @@
-import argparse
+import argparse # noqa: INP001, D100
import json
import os
from textwrap import wrap
@@ -6,66 +6,66 @@
from scipy import spatial
-def ReadSMC(smcFilePath):
- with open(smcFilePath, 'r+') as smcFile:
+def ReadSMC(smcFilePath): # noqa: N802, N803, D103
+ with open(smcFilePath, 'r+') as smcFile: # noqa: PTH123, N806
series = []
- smcLines = smcFile.readlines()
- dT = 1.0 / float(smcLines[17].strip().split()[1])
- nCommentLines = int(smcLines[12].strip().split()[7])
+ smcLines = smcFile.readlines() # noqa: N806
+ dT = 1.0 / float(smcLines[17].strip().split()[1]) # noqa: N806
+ nCommentLines = int(smcLines[12].strip().split()[7]) # noqa: N806
for line in smcLines[(27 + nCommentLines) :]:
for value in wrap(line, 10, drop_whitespace=False):
if value.strip():
- series.append(float(value) / 100.0)
+ series.append(float(value) / 100.0) # noqa: PERF401
return [series, dT]
-def ReadCOSMOS(cosmosFilePath):
- with open(cosmosFilePath, 'r+') as cosmosFile:
+def ReadCOSMOS(cosmosFilePath): # noqa: N802, N803, D103
+ with open(cosmosFilePath, 'r+') as cosmosFile: # noqa: PTH123, N806
series = []
- cosmosLines = cosmosFile.readlines()
- headerSize = int(cosmosLines[0][46:48])
- intSize = int(cosmosLines[headerSize][37:40])
- realSize = int(cosmosLines[headerSize + intSize + 1][34:37])
- commentSize = int(cosmosLines[headerSize + intSize + realSize + 2][0:4])
- totalHeader = headerSize + intSize + realSize + commentSize + 3
- recordSize = int(cosmosLines[totalHeader].strip().split()[0])
- dT = float(cosmosLines[37].strip().split()[1]) / 1000.0
+ cosmosLines = cosmosFile.readlines() # noqa: N806
+ headerSize = int(cosmosLines[0][46:48]) # noqa: N806
+ intSize = int(cosmosLines[headerSize][37:40]) # noqa: N806
+ realSize = int(cosmosLines[headerSize + intSize + 1][34:37]) # noqa: N806
+ commentSize = int(cosmosLines[headerSize + intSize + realSize + 2][0:4]) # noqa: N806
+ totalHeader = headerSize + intSize + realSize + commentSize + 3 # noqa: N806
+ recordSize = int(cosmosLines[totalHeader].strip().split()[0]) # noqa: N806
+ dT = float(cosmosLines[37].strip().split()[1]) / 1000.0 # noqa: N806
for line in cosmosLines[totalHeader + 1 : totalHeader + recordSize + 1]:
- series.append(float(line.strip()) / 100.0)
+ series.append(float(line.strip()) / 100.0) # noqa: PERF401
return [series, dT]
-def createEvent(recordsFolder, h1File, h2File, eventFilePath):
+def createEvent(recordsFolder, h1File, h2File, eventFilePath): # noqa: N802, N803, D103
if h1File.endswith('.smc'):
- h1, dt1 = ReadSMC(os.path.join(recordsFolder, h1File))
+ h1, dt1 = ReadSMC(os.path.join(recordsFolder, h1File)) # noqa: PTH118
else:
- h1, dt1 = ReadCOSMOS(os.path.join(recordsFolder, h1File))
+ h1, dt1 = ReadCOSMOS(os.path.join(recordsFolder, h1File)) # noqa: PTH118
if h2File.endswith('.smc'):
- h2, dt2 = ReadSMC(os.path.join(recordsFolder, h2File))
+ h2, dt2 = ReadSMC(os.path.join(recordsFolder, h2File)) # noqa: PTH118
else:
- h2, dt2 = ReadCOSMOS(os.path.join(recordsFolder, h2File))
+ h2, dt2 = ReadCOSMOS(os.path.join(recordsFolder, h2File)) # noqa: PTH118
- patternH1 = {}
+ patternH1 = {} # noqa: N806
patternH1['type'] = 'UniformAcceleration'
patternH1['timeSeries'] = 'accel_X'
patternH1['dof'] = 1
- patternH2 = {}
+ patternH2 = {} # noqa: N806
patternH2['type'] = 'UniformAcceleration'
patternH2['timeSeries'] = 'accel_Y'
patternH2['dof'] = 2
- seriesH1 = {}
+ seriesH1 = {} # noqa: N806
seriesH1['name'] = 'accel_X'
seriesH1['type'] = 'Value'
seriesH1['dT'] = dt1
seriesH1['data'] = h1
- seriesH2 = {}
+ seriesH2 = {} # noqa: N806
seriesH2['name'] = 'accel_Y'
seriesH2['type'] = 'Value'
seriesH2['dT'] = dt2
@@ -80,17 +80,17 @@ def createEvent(recordsFolder, h1File, h2File, eventFilePath):
event['timeSeries'] = [seriesH1, seriesH2]
event['pattern'] = [patternH1, patternH2]
- eventsDict = {}
+ eventsDict = {} # noqa: N806
eventsDict['Events'] = [event]
eventsDict['RandomVariables'] = []
- with open(eventFilePath, 'w') as eventFile:
+ with open(eventFilePath, 'w') as eventFile: # noqa: PTH123, N806
json.dump(eventsDict, eventFile, indent=4)
-def main():
+def main(): # noqa: D103
# Input Argument Specifications
- gmArgsParser = argparse.ArgumentParser(
+ gmArgsParser = argparse.ArgumentParser( # noqa: N806
'Characterize ground motion using seismic hazard analysis and record selection'
)
gmArgsParser.add_argument(
@@ -122,7 +122,7 @@ def main():
)
# Parse the arguments
- gmArgs = gmArgsParser.parse_args()
+ gmArgs = gmArgsParser.parse_args() # noqa: N806
# Check getRV flag
if not gmArgs.getRV:
@@ -131,37 +131,37 @@ def main():
return 0
# First let's process the arguments
- bimFilePath = gmArgs.filenameAIM
- eventFilePath = gmArgs.filenameEVENT
- gmConfigPath = gmArgs.groundMotions
- recordsFolder = gmArgs.recordsFolder
+ bimFilePath = gmArgs.filenameAIM # noqa: N806
+ eventFilePath = gmArgs.filenameEVENT # noqa: N806
+ gmConfigPath = gmArgs.groundMotions # noqa: N806
+ recordsFolder = gmArgs.recordsFolder # noqa: N806
- with open(gmConfigPath) as gmConfigFile:
- gmConfig = json.load(gmConfigFile)
+ with open(gmConfigPath) as gmConfigFile: # noqa: PTH123, N806
+ gmConfig = json.load(gmConfigFile) # noqa: N806
# We need to read the building location
- with open(bimFilePath) as bimFile:
+ with open(bimFilePath) as bimFile: # noqa: PTH123, N806
bim = json.load(bimFile)
location = [
bim['GI']['location']['latitude'],
bim['GI']['location']['longitude'],
]
- siteLocations = []
+ siteLocations = [] # noqa: N806
for gm in gmConfig['GroundMotion']:
- siteLocations.append(
+ siteLocations.append( # noqa: PERF401
[gm['Location']['Latitude'], gm['Location']['Longitude']]
)
# we need to find the nearest neighbor
- sitesTree = spatial.KDTree(siteLocations)
+ sitesTree = spatial.KDTree(siteLocations) # noqa: N806
nearest = sitesTree.query(location)
- nearestGM = gmConfig['GroundMotion'][nearest[1]]
- h1File = nearestGM['Records']['Horizontal1']
- h2File = nearestGM['Records']['Horizontal2']
+ nearestGM = gmConfig['GroundMotion'][nearest[1]] # noqa: N806
+ h1File = nearestGM['Records']['Horizontal1'] # noqa: N806
+ h2File = nearestGM['Records']['Horizontal2'] # noqa: N806
- createEvent(os.path.abspath(recordsFolder), h1File, h2File, eventFilePath)
+ createEvent(os.path.abspath(recordsFolder), h1File, h2File, eventFilePath) # noqa: RET503, PTH100
if __name__ == '__main__':
diff --git a/modules/createEVENT/NonisolatedLowRiseTPU/NonIsolatedLowRiseTPU.py b/modules/createEVENT/NonisolatedLowRiseTPU/NonIsolatedLowRiseTPU.py
index 8aea72fc7..011ee537f 100644
--- a/modules/createEVENT/NonisolatedLowRiseTPU/NonIsolatedLowRiseTPU.py
+++ b/modules/createEVENT/NonisolatedLowRiseTPU/NonIsolatedLowRiseTPU.py
@@ -1,4 +1,4 @@
-# python code to open the TPU .mat file
+# python code to open the TPU .mat file # noqa: INP001, D100
# and put data into a SimCenter JSON file for
# wind tunnel data
@@ -7,21 +7,21 @@
import scipy.io as sio
-inputArgs = sys.argv
+inputArgs = sys.argv # noqa: N816
-print('Number of arguments: %d' % len(sys.argv))
-print('The arguments are: %s' % str(sys.argv))
+print('Number of arguments: %d' % len(sys.argv)) # noqa: T201
+print('The arguments are: %s' % str(sys.argv)) # noqa: T201, UP031
# set filenames
-matFileIN = sys.argv[1]
-jsonFileOUT = sys.argv[2]
+matFileIN = sys.argv[1] # noqa: N816
+jsonFileOUT = sys.argv[2] # noqa: N816
-dataDir = os.getcwd()
-scriptDir = os.path.dirname(os.path.realpath(__file__))
+dataDir = os.getcwd() # noqa: PTH109, N816
+scriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N816
-def parseTPU_LowRise_MatFile(matFileIn, windFileOutName):
- file = open(windFileOutName, 'w')
+def parseTPU_LowRise_MatFile(matFileIn, windFileOutName): # noqa: C901, N802, N803, D103
+ file = open(windFileOutName, 'w') # noqa: SIM115, PTH123
file.write('{\n')
mat_contents = sio.loadmat(matFileIn)
breadth = mat_contents['Building_breadth'][0][0]
@@ -32,56 +32,56 @@ def parseTPU_LowRise_MatFile(matFileIn, windFileOutName):
period = mat_contents['Sample_period'][0][0]
frequency = mat_contents['Sample_frequency'][0][0]
angle = mat_contents['Wind_azimuth'][0][0]
- roofType = mat_contents['Roof_type'][0]
+ roofType = mat_contents['Roof_type'][0] # noqa: N806
if roofType == 'flat roof':
- roofType = 'Flat'
+ roofType = 'Flat' # noqa: N806
elif roofType == 'gable roof':
- roofType = 'Gable'
+ roofType = 'Gable' # noqa: N806
file.write('"roofType":"' + roofType + '",')
- file.write('"windSpeed":%f,' % 22.0)
- file.write('"depth":%f,' % depth)
- file.write('"height":%f,' % height)
- file.write('"breadth":%f,' % breadth)
- file.write('"pitch":%f,' % pitch)
- file.write('"period":%f,' % period)
+ file.write('"windSpeed":%f,' % 22.0) # noqa: UP031
+ file.write('"depth":%f,' % depth) # noqa: UP031
+ file.write('"height":%f,' % height) # noqa: UP031
+ file.write('"breadth":%f,' % breadth) # noqa: UP031
+ file.write('"pitch":%f,' % pitch) # noqa: UP031
+ file.write('"period":%f,' % period) # noqa: UP031
file.write('"units":{"length":"m","time":"sec"},')
- file.write('"frequency":%f,' % frequency)
- file.write('"incidenceAngle":%f,' % angle)
+ file.write('"frequency":%f,' % frequency) # noqa: UP031
+ file.write('"incidenceAngle":%f,' % angle) # noqa: UP031
file.write('"tapLocations": [')
locations = mat_contents['Location_of_measured_points']
- numLocations = locations.shape[1]
+ numLocations = locations.shape[1] # noqa: N806
# get xMax and yMax .. assuming first sensor is 1m from building edge
# location on faces cannot be obtained from the inputs, at least not with
# current documentation, awaing email from TPU
- xMax = max(locations[0]) + 1
- yMax = max(locations[1]) + 1
+ xMax = max(locations[0]) + 1 # noqa: N806
+ yMax = max(locations[1]) + 1 # noqa: N806
for loc in range(numLocations):
tag = locations[2][loc]
- xLoc = locations[0][loc]
- yLoc = locations[1][loc]
+ xLoc = locations[0][loc] # noqa: N806
+ yLoc = locations[1][loc] # noqa: N806
face = locations[3][loc]
if roofType == 'Flat':
- X = xLoc
- Y = yLoc
+ X = xLoc # noqa: N806
+ Y = yLoc # noqa: N806
if face == 1:
- xLoc = -(Y - breadth / 2.0)
- yLoc = X + xMax
- elif face == 2:
- xLoc = X + depth / 2.0
- yLoc = Y + yMax
- elif face == 3:
- xLoc = Y + breadth / 2.0
- yLoc = -(X - xMax)
- elif face == 4:
- xLoc = -(X - depth / 2.0)
- yLoc = -(Y - yMax)
+ xLoc = -(Y - breadth / 2.0) # noqa: N806
+ yLoc = X + xMax # noqa: N806
+ elif face == 2: # noqa: PLR2004
+ xLoc = X + depth / 2.0 # noqa: N806
+ yLoc = Y + yMax # noqa: N806
+ elif face == 3: # noqa: PLR2004
+ xLoc = Y + breadth / 2.0 # noqa: N806
+ yLoc = -(X - xMax) # noqa: N806
+ elif face == 4: # noqa: PLR2004
+ xLoc = -(X - depth / 2.0) # noqa: N806
+ yLoc = -(Y - yMax) # noqa: N806
else:
- xLoc = X + depth / 2
- yLoc = Y + breadth / 2
+ xLoc = X + depth / 2 # noqa: N806
+ yLoc = Y + breadth / 2 # noqa: N806
if loc == numLocations - 1:
file.write(
@@ -94,16 +94,16 @@ def parseTPU_LowRise_MatFile(matFileIn, windFileOutName):
file.write(',"pressureCoefficients": [')
coefficients = mat_contents['Wind_pressure_coefficients']
- numLocations = coefficients.shape[1]
- numValues = coefficients.shape[0]
+ numLocations = coefficients.shape[1] # noqa: N806
+ numValues = coefficients.shape[0] # noqa: N806
for loc in range(numLocations):
file.write('{"id": %d , "data":[' % (loc + 1))
for i in range(numValues - 1):
- file.write('%f,' % coefficients[i, loc])
+ file.write('%f,' % coefficients[i, loc]) # noqa: UP031
if loc != numLocations - 1:
- file.write('%f]},' % coefficients[numValues - 1, loc])
+ file.write('%f]},' % coefficients[numValues - 1, loc]) # noqa: UP031
else:
- file.write('%f]}]' % coefficients[numValues - 1, loc])
+ file.write('%f]}]' % coefficients[numValues - 1, loc]) # noqa: UP031
file.write('}')
file.close()
diff --git a/modules/createEVENT/SimCenterEvent/SimCenterEvent.py b/modules/createEVENT/SimCenterEvent/SimCenterEvent.py
index 36ca9bd19..baf660934 100644
--- a/modules/createEVENT/SimCenterEvent/SimCenterEvent.py
+++ b/modules/createEVENT/SimCenterEvent/SimCenterEvent.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -46,21 +46,21 @@
import numpy as np
# import the common constants and methods
-this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
+this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120
main_dir = this_dir.parents[1]
sys.path.insert(0, str(main_dir / 'common'))
-from simcenter_common import get_scale_factors, get_unit_bases
+from simcenter_common import get_scale_factors, get_unit_bases # noqa: E402
-def write_RV(AIM_file, EVENT_file):
+def write_RV(AIM_file, EVENT_file): # noqa: N802, N803, D103
# load the AIM file to get information about the assigned events
- with open(AIM_file, encoding='utf-8') as f:
+ with open(AIM_file, encoding='utf-8') as f: # noqa: PTH123
aim_file = json.load(f)
input_units = None
- if 'RegionalEvent' in aim_file.keys():
+ if 'RegionalEvent' in aim_file.keys(): # noqa: SIM118
input_units = aim_file['RegionalEvent'].get('units', None)
output_units = aim_file.get('units', None)
@@ -72,7 +72,7 @@ def write_RV(AIM_file, EVENT_file):
input_unit_bases = get_unit_bases(input_units)
# get the location of the event input files
- # TODO: assuming a single event for now
+ # TODO: assuming a single event for now # noqa: TD002
aim_event_input = aim_file['Events'][0]
data_dir = Path(aim_event_input['EventFolderPath'])
@@ -110,7 +110,7 @@ def write_RV(AIM_file, EVENT_file):
)
# collect the filenames
- RV_elements = np.array(events).T[0].tolist()
+ RV_elements = np.array(events).T[0].tolist() # noqa: N806
# for event in events:
# #if event['EventClassification'] in ['Earthquake', 'Hurricane',
# # 'Flood']:
@@ -136,7 +136,7 @@ def write_RV(AIM_file, EVENT_file):
)
# if time histories are used, then load the first event
- # TODO: this is needed by some other code that should be fixed and this
+ # TODO: this is needed by some other code that should be fixed and this # noqa: TD002
# part should be removed.
if aim_event_input['type'] == 'timeHistory':
@@ -146,16 +146,16 @@ def write_RV(AIM_file, EVENT_file):
# , event_class = event_class))
# save the EVENT dictionary to a json file
- with open(EVENT_file, 'w', encoding='utf-8') as f:
+ with open(EVENT_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(event_file, f, indent=2)
-def load_record(
+def load_record( # noqa: D103
file_name,
data_dir,
f_scale_user=1.0,
- f_scale_units={'ALL': 1.0},
- empty=False,
+ f_scale_units={'ALL': 1.0}, # noqa: B006
+ empty=False, # noqa: FBT002
):
# event_class=None):
@@ -167,22 +167,22 @@ def load_record(
# open the input event data file
# (SimCenter json format is assumed here)
- with open(data_dir / f'{file_name}.json', encoding='utf-8') as f:
+ with open(data_dir / f'{file_name}.json', encoding='utf-8') as f: # noqa: PTH123
event_data = json.load(f)
# check if Event File is already in EVENT format
- isEventFile = False
+ isEventFile = False # noqa: N806
if event_data.__contains__('Events'):
event_dic = event_data['Events'][0]
# event_dic['dT'] = event_data['Events'][0]['dT']
# event_dic['numSteps'] = event_data['Events'][0]['numSteps']
# event_dic['timeSeries'] = event_data['Events'][0]['timeSeries']
# event_dic['pattern'] = event_data['Events'][0]['pattern']
- return event_dic
+ return event_dic # noqa: RET504
- isEventFile = True
+ isEventFile = True # noqa: N806
- else:
+ else: # noqa: RET505
# initialize the internal EVENT file structure
event_dic = {
'name': file_name,
@@ -195,18 +195,18 @@ def load_record(
if not isEventFile:
f_scale_units = f_scale_units.get('TH_file', f_scale_units.get('ALL', None))
if f_scale_units is None:
- raise ValueError('No unit scaling is defined for time history data.')
+ raise ValueError('No unit scaling is defined for time history data.') # noqa: EM101, TRY003
f_scale = float(f_scale_units) * float(f_scale_user)
# generate the event files
- # TODO: add 'z' later
+ # TODO: add 'z' later # noqa: TD002
for i, dir_ in enumerate(['x', 'y']):
src_label = 'data_' + dir_
tar_label = src_label
# if there is data in the given direction in the input file
- if src_label in event_data.keys():
+ if src_label in event_data.keys(): # noqa: SIM118
# then load that data into the output EVENT file and scale it
event_dic['timeSeries'].append(
{
@@ -221,7 +221,7 @@ def load_record(
if empty:
event_dic['timeSeries'][-1]['data'] = []
- # TODO: We will need to generalize this as soon as we add
+ # TODO: We will need to generalize this as soon as we add # noqa: TD002
# different types of time histories
# Assuming acceleration time history for now.
event_dic['pattern'].append(
@@ -235,17 +235,17 @@ def load_record(
return event_dic
-def get_records(AIM_file, EVENT_file):
+def get_records(AIM_file, EVENT_file): # noqa: N803
"""This function is only called if UQ is part of the workflow. That is, it is
not called if we are using IMasEDP and skipping the response simulation.
- """
+ """ # noqa: D205, D401, D404
# load the AIM file
- with open(AIM_file, encoding='utf-8') as f:
- AIM_file = json.load(f)
+ with open(AIM_file, encoding='utf-8') as f: # noqa: PTH123
+ AIM_file = json.load(f) # noqa: N806
# load the EVENT file
- with open(EVENT_file, encoding='utf-8') as f:
+ with open(EVENT_file, encoding='utf-8') as f: # noqa: PTH123
event_file = json.load(f)
# event_class = AIM_file['Events']['Events'][0]['EventClassification']
@@ -275,7 +275,7 @@ def get_records(AIM_file, EVENT_file):
) # , event_class = event_class))
# save the updated EVENT file
- with open(EVENT_file, 'w', encoding='utf-8') as f:
+ with open(EVENT_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(event_file, f, indent=2)
diff --git a/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py b/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py
index 4660c155f..b331ebbec 100644
--- a/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py
+++ b/modules/createEVENT/SurroundedBuildingCFD/SurroundedBuildingCFD.py
@@ -1,25 +1,25 @@
-import argparse
+import argparse # noqa: INP001, D100
import json
-class FloorForces:
+class FloorForces: # noqa: D101
def __init__(self):
self.X = [0]
self.Y = [0]
self.Z = [0]
-def directionToDof(direction):
- """Converts direction to degree of freedom"""
- directioMap = {'X': 1, 'Y': 2, 'Z': 3}
+def directionToDof(direction): # noqa: N802
+ """Converts direction to degree of freedom""" # noqa: D400, D401
+ directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806
return directioMap[direction]
-def addFloorForceToEvent(patternsArray, force, direction, floor):
- """Add force (one component) time series and pattern in the event file"""
- seriesName = 'WindForceSeries_' + str(floor) + direction
- patternName = 'WindForcePattern_' + str(floor) + direction
+def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803
+ """Add force (one component) time series and pattern in the event file""" # noqa: D400
+ seriesName = 'WindForceSeries_' + str(floor) + direction # noqa: N806
+ patternName = 'WindForcePattern_' + str(floor) + direction # noqa: N806
pattern = {
'name': patternName,
'timeSeries': seriesName,
@@ -31,10 +31,10 @@ def addFloorForceToEvent(patternsArray, force, direction, floor):
patternsArray.append(pattern)
-def writeEVENT(forces, eventFilePath):
- """This method writes the EVENT.json file"""
- patternsArray = []
- windEventJson = {
+def writeEVENT(forces, eventFilePath): # noqa: N802, N803
+ """This method writes the EVENT.json file""" # noqa: D400, D401, D404
+ patternsArray = [] # noqa: N806
+ windEventJson = { # noqa: N806
'type': 'Wind',
'subtype': 'SurroundedBuildingCFD',
'pattern': patternsArray,
@@ -44,20 +44,20 @@ def writeEVENT(forces, eventFilePath):
}
# Creating the event dictionary that will be used to export the EVENT json file
- eventDict = {'randomVariables': [], 'Events': [windEventJson]}
+ eventDict = {'randomVariables': [], 'Events': [windEventJson]} # noqa: N806
# Adding floor forces
- for floorForces in forces:
+ for floorForces in forces: # noqa: N806
floor = forces.index(floorForces) + 1
addFloorForceToEvent(patternsArray, floorForces.X, 'X', floor)
addFloorForceToEvent(patternsArray, floorForces.Y, 'Y', floor)
- with open(eventFilePath, 'w') as eventsFile:
+ with open(eventFilePath, 'w') as eventsFile: # noqa: PTH123, N806
json.dump(eventDict, eventsFile)
-def GetFloorsCount(BIMFilePath):
- with open(BIMFilePath) as BIMFile:
+def GetFloorsCount(BIMFilePath): # noqa: N802, N803, D103
+ with open(BIMFilePath) as BIMFile: # noqa: PTH123, N806
bim = json.load(BIMFile)
return int(bim['GeneralInformation']['stories'])
@@ -78,11 +78,11 @@ def GetFloorsCount(BIMFilePath):
# parsing arguments
arguments, unknowns = parser.parse_known_args()
- if arguments.getRV == True:
+ if arguments.getRV == True: # noqa: E712
# Read the number of floors
- floorsCount = GetFloorsCount(arguments.filenameAIM)
+ floorsCount = GetFloorsCount(arguments.filenameAIM) # noqa: N816
forces = []
- for i in range(floorsCount):
- forces.append(FloorForces())
+ for i in range(floorsCount): # noqa: B007
+ forces.append(FloorForces()) # noqa: PERF401
# write the event file
writeEVENT(forces, arguments.filenameEVENT)
diff --git a/modules/createEVENT/SurroundedBuildingCFD/foam_file_processor.py b/modules/createEVENT/SurroundedBuildingCFD/foam_file_processor.py
index 946fc6916..58399274d 100644
--- a/modules/createEVENT/SurroundedBuildingCFD/foam_file_processor.py
+++ b/modules/createEVENT/SurroundedBuildingCFD/foam_file_processor.py
@@ -1,4 +1,4 @@
-# This script contains functions for reading and writing
+# This script contains functions for reading and writing # noqa: INP001, D100
# OpenFoam dictionaries and filses.
#
import os
@@ -6,18 +6,18 @@
import numpy as np
-def find_keyword_line(dict_lines, keyword):
+def find_keyword_line(dict_lines, keyword): # noqa: D103
start_line = -1
count = 0
for line in dict_lines:
- l = line.lstrip(' ')
+ l = line.lstrip(' ') # noqa: E741
if l.startswith(keyword):
start_line = count
break
- count += 1
+ count += 1 # noqa: SIM113
return start_line
@@ -29,11 +29,11 @@ def write_foam_field(field, file_name):
vectorField,
tensorField,
symmTensorField
- """
- if os.path.exists(file_name):
- os.remove(file_name)
+ """ # noqa: D205, D400, D401
+ if os.path.exists(file_name): # noqa: PTH110
+ os.remove(file_name) # noqa: PTH107
- foam_file = open(file_name, 'w+')
+ foam_file = open(file_name, 'w+') # noqa: SIM115, PTH123
size = np.shape(field)
@@ -54,11 +54,11 @@ def write_foam_field(field, file_name):
def write_scalar_field(field, file_name):
"""Writes a given one dimensional numpy array to OpenFOAM
scalar field format.
- """
- if os.path.exists(file_name):
- os.remove(file_name)
+ """ # noqa: D205, D401
+ if os.path.exists(file_name): # noqa: PTH110
+ os.remove(file_name) # noqa: PTH107
- foam_file = open(file_name, 'w+')
+ foam_file = open(file_name, 'w+') # noqa: SIM115, PTH123
size = np.shape(field)
diff --git a/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py b/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py
index 7ec4f39ac..4021a5915 100644
--- a/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py
+++ b/modules/createEVENT/SurroundedBuildingCFD/post_process_output.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017, The Regents of the University of California (Regents).
+# Copyright (c) 2016-2017, The Regents of the University of California (Regents). # noqa: INP001, D100
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -54,30 +54,30 @@
from scipy import signal
-def readPressureProbes(fileName):
+def readPressureProbes(fileName): # noqa: N802, N803
"""Created on Wed May 16 14:31:42 2018
Reads pressure probe data from OpenFOAM and return the probe location, time, and the pressure
for each time step.
@author: Abiy
- """
+ """ # noqa: D400, D401
probes = []
p = []
time = []
- with open(fileName) as f:
+ with open(fileName) as f: # noqa: PTH123
for line in f:
if line.startswith('#'):
if line.startswith('# Probe'):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
probes.append([float(line[3]), float(line[4]), float(line[5])])
else:
continue
else:
- line = line.split()
+ line = line.split() # noqa: PLW2901
time.append(float(line[0]))
p_probe_i = np.zeros([len(probes)])
for i in range(len(probes)):
@@ -106,7 +106,7 @@ def read_pressure_data(file_names):
time, pressure
Returns the pressure time and pressure data of the connected file.
- """
+ """ # noqa: D205, D401, D404
no_files = len(file_names)
connected_time = [] # Connected array of time
connected_p = [] # connected array of pressure.
@@ -128,7 +128,7 @@ def read_pressure_data(file_names):
index = np.where(time2 > time1[-1])[0][0]
# index += 1
- except:
+ except: # noqa: E722
# sys.exit('Fatal Error!: the pressure files have time gap')
index = 0 # Joint them even if they have a time gap
@@ -136,7 +136,7 @@ def read_pressure_data(file_names):
connected_p = np.concatenate((connected_p, p2[index:]))
time1 = time2
- p1 = p2
+ p1 = p2 # noqa: F841
return probes, connected_time, connected_p
@@ -144,7 +144,7 @@ class PressureData:
"""A class that holds a pressure data and performs the following operations:
- mean and rms pressure coefficients
- peak pressure coefficients
- """
+ """ # noqa: D205, D400
def __init__(
self,
@@ -172,8 +172,8 @@ def __init__(
self.probe_count = np.shape(self.probes)[0]
def __read_cfd_data(self):
- if os.path.isdir(self.path):
- print('Reading from path : %s' % (self.path))
+ if os.path.isdir(self.path): # noqa: PTH112
+ print('Reading from path : %s' % (self.path)) # noqa: T201, UP031
time_names = os.listdir(self.path)
sorted_index = np.argsort(np.float64(time_names)).tolist()
# print(sorted_index)
@@ -181,7 +181,7 @@ def __read_cfd_data(self):
file_names = []
for i in range(len(sorted_index)):
- file_name = os.path.join(self.path, time_names[sorted_index[i]], 'p')
+ file_name = os.path.join(self.path, time_names[sorted_index[i]], 'p') # noqa: PTH118
file_names.append(file_name)
# print(file_names)
@@ -190,30 +190,30 @@ def __read_cfd_data(self):
# self.p = np.transpose(self.p) # OpenFOAM gives p/rho
else:
- print('Cannot find the file path: %s' % (self.path))
+ print('Cannot find the file path: %s' % (self.path)) # noqa: T201, UP031
def __set_time(self):
- if self.start_time != None:
+ if self.start_time != None: # noqa: E711
start_index = int(np.argmax(self.time > self.start_time))
self.time = self.time[start_index:]
# self.cp = self.cp[:,start_index:]
- try:
+ try: # noqa: SIM105
self.p = self.p[:, start_index:]
- except:
+ except: # noqa: S110, E722
pass
- if self.end_time != None:
+ if self.end_time != None: # noqa: E711
end_index = int(np.argmax(self.time > self.end_time))
self.time = self.time[:end_index]
# self.cp = self.cp[:,:end_index]
- try:
+ try: # noqa: SIM105
self.p = self.p[:, :end_index]
- except:
+ except: # noqa: S110, E722
pass
-def von_karman_spectrum(f, Uav, I, L, comp=0):
- psd = np.zeros(len(f))
+def von_karman_spectrum(f, Uav, I, L, comp=0): # noqa: N803, E741, D103
+ psd = np.zeros(len(f)) # noqa: F841
if comp == 0:
return (
@@ -223,7 +223,7 @@ def von_karman_spectrum(f, Uav, I, L, comp=0):
/ np.power(1.0 + 70.8 * np.power(f * L / Uav, 2.0), 5.0 / 6.0)
)
- if comp == 1 or comp == 2:
+ if comp == 1 or comp == 2: # noqa: RET503, PLR1714, PLR2004
return (
4.0
* np.power(I * Uav, 2.0)
@@ -251,7 +251,7 @@ def psd(x, dt, nseg):
freq, spectra
Returns the frequency and spectra of the signal
- """
+ """ # noqa: D205, D401
x_no_mean = x - np.mean(x)
freq, spectra = signal.welch(
x_no_mean, fs=1.0 / dt, nperseg=len(x_no_mean) / nseg
@@ -264,8 +264,8 @@ def write_open_foam_vector_field(p, file_name):
"""Writes a given vector-field (n x 3) array to OpenFOAM 'vectorField'
format.
- """
- f = open(file_name, 'w+')
+ """ # noqa: D205, D401
+ f = open(file_name, 'w+') # noqa: SIM115, PTH123
f.write('%d' % len(p[:, 2]))
f.write('\n(')
for i in range(len(p[:, 2])):
@@ -275,58 +275,58 @@ def write_open_foam_vector_field(p, file_name):
f.close()
-def read_openFoam_scalar_field(file_name):
- """Reads a given vectorField OpenFOAM into numpy (n x 3) array format."""
- sField = []
+def read_openFoam_scalar_field(file_name): # noqa: N802
+ """Reads a given vectorField OpenFOAM into numpy (n x 3) array format.""" # noqa: D401
+ sField = [] # noqa: N806
- with open(file_name) as f:
+ with open(file_name) as f: # noqa: PTH123
itrf = iter(f)
next(itrf)
for line in itrf:
- if line.startswith('(') or line.startswith(')'):
+ if line.startswith('(') or line.startswith(')'): # noqa: PIE810
continue
- else:
- line = line.split()
+ else: # noqa: RET507
+ line = line.split() # noqa: PLW2901
sField.append(float(line[0]))
- sField = np.asarray(sField, dtype=np.float32)
+ sField = np.asarray(sField, dtype=np.float32) # noqa: N806
- return sField
+ return sField # noqa: RET504
-def read_openFoam_vector_field(file_name):
- """Reads a given vectorField OpenFOAM into numpy (n x 3) array format."""
- vField = []
+def read_openFoam_vector_field(file_name): # noqa: N802
+ """Reads a given vectorField OpenFOAM into numpy (n x 3) array format.""" # noqa: D401
+ vField = [] # noqa: N806
- with open(file_name) as f:
+ with open(file_name) as f: # noqa: PTH123
for line in f:
if line.startswith('('):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
- if len(line) < 3:
+ if len(line) < 3: # noqa: PLR2004
continue
vField.append([float(line[0]), float(line[1]), float(line[2])])
- vField = np.asarray(vField, dtype=np.float32)
+ vField = np.asarray(vField, dtype=np.float32) # noqa: N806
- return vField
+ return vField # noqa: RET504
-def read_openFoam_tensor_field(file_name):
- """Reads a given vectorField OpenFOAM into numpy (n x 3) array format."""
- vField = []
+def read_openFoam_tensor_field(file_name): # noqa: N802
+ """Reads a given vectorField OpenFOAM into numpy (n x 3) array format.""" # noqa: D401
+ vField = [] # noqa: N806
row_count = 9
- with open(file_name) as f:
+ with open(file_name) as f: # noqa: PTH123
for line in f:
if line.startswith('('):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
if len(line) < row_count:
continue
@@ -338,23 +338,23 @@ def read_openFoam_tensor_field(file_name):
vField.append(row)
- vField = np.asarray(vField, dtype=np.float32)
+ vField = np.asarray(vField, dtype=np.float32) # noqa: N806
- return vField
+ return vField # noqa: RET504
-def read_openFoam_symmetric_tensor_field(file_name):
- """Reads a given vectorField OpenFOAM into numpy (n x 3) array format."""
- vField = []
+def read_openFoam_symmetric_tensor_field(file_name): # noqa: N802
+ """Reads a given vectorField OpenFOAM into numpy (n x 3) array format.""" # noqa: D401
+ vField = [] # noqa: N806
row_count = 6
- with open(file_name) as f:
+ with open(file_name) as f: # noqa: PTH123
for line in f:
if line.startswith('('):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
if len(line) < row_count:
continue
@@ -365,9 +365,9 @@ def read_openFoam_symmetric_tensor_field(file_name):
vField.append(row)
- vField = np.asarray(vField, dtype=np.float32)
+ vField = np.asarray(vField, dtype=np.float32) # noqa: N806
- return vField
+ return vField # noqa: RET504
def read_velocity_data(path):
@@ -385,35 +385,35 @@ def read_velocity_data(path):
time, pressure
Returns the velocity time and velocity data of the connected file.
- """
+ """ # noqa: D205, D401, D404
num_files = len(path)
connected_time = [] # Connected array of time
- connected_U = [] # connected array of pressure.
+ connected_U = [] # connected array of pressure. # noqa: N806
time1 = []
- U1 = []
+ U1 = [] # noqa: N806
time2 = []
- U2 = []
+ U2 = [] # noqa: N806
probes = []
for i in range(num_files):
- probes, time2, U2 = read_velocity_probes(path[i])
+ probes, time2, U2 = read_velocity_probes(path[i]) # noqa: N806
if i != 0:
try:
index = np.where(time2 > time1[-1])[0][0]
- except:
+ except: # noqa: E722
# sys.exit('Fatal Error!: the pressure files have time gap')
index = 0 # Join them even if they have a time gap
connected_time = np.concatenate((connected_time, time2[index:]))
- connected_U = np.concatenate((connected_U, U2[index:]))
+ connected_U = np.concatenate((connected_U, U2[index:])) # noqa: N806
else:
connected_time = time2
- connected_U = U2
+ connected_U = U2 # noqa: N806
time1 = time2
- U1 = U2
+ U1 = U2 # noqa: N806, F841
shape = np.shape(connected_U)
- U = np.zeros((shape[1], shape[2], shape[0]))
+ U = np.zeros((shape[1], shape[2], shape[0])) # noqa: N806
for i in range(shape[1]):
for j in range(shape[2]):
@@ -421,33 +421,33 @@ def read_velocity_data(path):
return probes, connected_time, U
-def read_velocity_probes(fileName):
+def read_velocity_probes(fileName): # noqa: N803
"""Created on Wed May 16 14:31:42 2018
Reads velocity probe data from OpenFOAM and return the probe location, time,
and the velocity vector for each time step.
- """
+ """ # noqa: D400, D401
probes = []
- U = []
+ U = [] # noqa: N806
time = []
- with open(fileName) as f:
+ with open(fileName) as f: # noqa: PTH123
for line in f:
if line.startswith('#'):
if line.startswith('# Probe'):
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
probes.append([float(line[3]), float(line[4]), float(line[5])])
else:
continue
else:
- line = line.replace('(', '')
- line = line.replace(')', '')
- line = line.split()
+ line = line.replace('(', '') # noqa: PLW2901
+ line = line.replace(')', '') # noqa: PLW2901
+ line = line.split() # noqa: PLW2901
try:
time.append(float(line[0]))
- except:
+ except: # noqa: S112, E722
continue
u_probe_i = np.zeros([len(probes), 3])
for i in range(len(probes)):
@@ -460,13 +460,13 @@ def read_velocity_probes(fileName):
probes = np.asarray(probes, dtype=np.float32)
time = np.asarray(time, dtype=np.float32)
- U = np.asarray(U, dtype=np.float32)
+ U = np.asarray(U, dtype=np.float32) # noqa: N806
return probes, time, U
def calculate_length_scale(u, uav, dt, min_corr=0.0):
- """Calculates the length scale of a velocity time history given."""
+ """Calculates the length scale of a velocity time history given.""" # noqa: D401
u = u - np.mean(u)
corr = signal.correlate(u, u, mode='full')
@@ -479,12 +479,12 @@ def calculate_length_scale(u, uav, dt, min_corr=0.0):
corr = corr[:loc]
- L = uav * np.trapz(corr, dx=dt)
+ L = uav * np.trapz(corr, dx=dt) # noqa: NPY201, N806
- return L
+ return L # noqa: RET504
-def psd(x, dt, nseg):
+def psd(x, dt, nseg): # noqa: F811
"""Calculates the power spectral density of a given signal using the welch
method.
@@ -502,7 +502,7 @@ def psd(x, dt, nseg):
freq, spectra
Returns the frequency and spectra of the signal
- """
+ """ # noqa: D205, D401
x_no_mean = x - np.mean(x)
freq, spectra = signal.welch(
x_no_mean, fs=1.0 / dt, nperseg=len(x_no_mean) / nseg
@@ -516,13 +516,13 @@ class VelocityData:
- mean velocity profile
- turbulence intensity profiles
- integral scale of turbulence profiles
- """
+ """ # noqa: D205, D400
def __init__(
self,
path,
sampling_rate=400,
- filter_data=False,
+ filter_data=False, # noqa: FBT002
filter_freq=400,
start_time=None,
end_time=None,
@@ -551,14 +551,14 @@ def __init__(
self.__calculate_all()
def __read_cfd_data(self):
- if os.path.isdir(self.path):
- print('Reading from path : %s' % (self.path))
+ if os.path.isdir(self.path): # noqa: PTH112
+ print('Reading from path : %s' % (self.path)) # noqa: T201, UP031
time_names = os.listdir(self.path)
sorted_index = np.argsort(np.float64(time_names)).tolist()
file_names = []
for i in range(len(sorted_index)):
- file_name = os.path.join(self.path, time_names[sorted_index[i]], 'U')
+ file_name = os.path.join(self.path, time_names[sorted_index[i]], 'U') # noqa: PTH118
file_names.append(file_name)
self.probes, self.time, self.U = read_velocity_data(file_names)
@@ -576,14 +576,14 @@ def __read_cfd_data(self):
# Coefficient of variation
cv = np.std(np.diff(self.time)) / np.mean(np.diff(self.time))
- if cv > 1.0e-4:
+ if cv > 1.0e-4: # noqa: PLR2004
self.__adjust_time_step()
else:
- print('Cannot find the file path: %s' % (self.path))
+ print('Cannot find the file path: %s' % (self.path)) # noqa: T201, UP031
def __adjust_time_step(self):
- if self.resample_dt == None:
+ if self.resample_dt == None: # noqa: E711
dt = np.mean(np.diff(self.time))
else:
dt = self.resample_dt
@@ -592,7 +592,7 @@ def __adjust_time_step(self):
shape = np.shape(self.U)
- U = np.zeros((shape[0], shape[1], len(time)))
+ U = np.zeros((shape[0], shape[1], len(time))) # noqa: N806
for i in range(shape[0]):
for j in range(shape[1]):
@@ -611,12 +611,12 @@ def __filter_signal(self):
self.U[i, j, :] = signal.sosfilt(low_pass, self.U[i, j, :])
def __set_time(self):
- if self.start_time != None:
+ if self.start_time != None: # noqa: E711
start_index = int(np.argmax(self.time > self.start_time))
self.time = self.time[start_index:]
self.U = self.U[:, :, start_index:]
- if self.end_time != None:
+ if self.end_time != None: # noqa: E711
end_index = int(np.argmax(self.time > self.end_time))
self.time = self.time[:end_index]
self.U = self.U[:, :, :end_index]
@@ -654,7 +654,7 @@ def __calculate_all(self):
self.uv_bar[i] = np.cov(self.U[i, 0, :], self.U[i, 1, :])[0, 1]
self.uw_bar[i] = np.cov(self.U[i, 0, :], self.U[i, 2, :])[0, 1]
- def get_Uav(self, z):
+ def get_Uav(self, z): # noqa: N802, D102
from scipy import interpolate
f = interpolate.interp1d(self.z, self.Uav)
@@ -670,48 +670,48 @@ def copy_vtk_planes_and_order(input_path, output_path, field):
input_path: path of the vtk files in the postProcessing directory
ouput_path: path to write the vtk files in order
- """
- if not os.path.isdir(input_path):
- print(f'Cannot find the path for: {input_path}')
+ """ # noqa: D205, D401, D404
+ if not os.path.isdir(input_path): # noqa: PTH112
+ print(f'Cannot find the path for: {input_path}') # noqa: T201
return
- if not os.path.isdir(output_path):
- print(f'Cannot find the path for: {output_path}')
+ if not os.path.isdir(output_path): # noqa: PTH112
+ print(f'Cannot find the path for: {output_path}') # noqa: T201
return
- print(f'Reading from path: {input_path}')
+ print(f'Reading from path: {input_path}') # noqa: T201
time_names = os.listdir(input_path)
times = np.float64(time_names)
sorted_index = np.argsort(times).tolist()
n_times = len(times)
- print(f'\tNumber of time directories: {n_times} ')
- print(f'\tTime step: {np.mean(np.diff(times)):.4f} s')
- print(
+ print(f'\tNumber of time directories: {n_times} ') # noqa: T201
+ print(f'\tTime step: {np.mean(np.diff(times)):.4f} s') # noqa: T201
+ print( # noqa: T201
f'\tTotal duration: {times[sorted_index[-1]] - times[sorted_index[0]]:.4f} s'
)
for i in range(n_times):
index = sorted_index[i]
- pathi = os.path.join(input_path, time_names[index])
+ pathi = os.path.join(input_path, time_names[index]) # noqa: PTH118
os.listdir(pathi)
new_name = f'{field}_T{i + 1:04d}.vtk'
for f in os.listdir(pathi):
if f.endswith('.vtk'):
- new_path = os.path.join(output_path, new_name)
- old_path = os.path.join(pathi, f)
+ new_path = os.path.join(output_path, new_name) # noqa: PTH118
+ old_path = os.path.join(pathi, f) # noqa: PTH118
shutil.copyfile(old_path, new_path)
- print(f'Copied path: {old_path}')
+ print(f'Copied path: {old_path}') # noqa: T201
-def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
+def plot_wind_profiles_and_spectra(case_path, output_path, prof_name): # noqa: D103
# Read JSON data
- json_path = os.path.join(
+ json_path = os.path.join( # noqa: PTH118
case_path, 'constant', 'simCenter', 'input', 'EmptyDomainCFD.json'
)
- with open(json_path) as json_file:
+ with open(json_path) as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -719,7 +719,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
ref_h = wc_data['referenceHeight']
- prof_path = os.path.join(case_path, 'postProcessing', prof_name)
+ prof_path = os.path.join(case_path, 'postProcessing', prof_name) # noqa: PTH118
prof = VelocityData(prof_path, start_time=None, end_time=None)
@@ -736,26 +736,26 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
prof_np[:, 8] = prof.L[:, 2]
# Read the target wind profile data
- tar_path = os.path.join(case_path, 'constant', 'boundaryData', 'inlet')
+ tar_path = os.path.join(case_path, 'constant', 'boundaryData', 'inlet') # noqa: PTH118
- tar_p = read_openFoam_vector_field(os.path.join(tar_path, 'points'))
- tar_U = read_openFoam_scalar_field(os.path.join(tar_path, 'U'))
- tar_R = read_openFoam_symmetric_tensor_field(os.path.join(tar_path, 'R'))
- tar_L = read_openFoam_tensor_field(os.path.join(tar_path, 'L'))
+ tar_p = read_openFoam_vector_field(os.path.join(tar_path, 'points')) # noqa: PTH118
+ tar_U = read_openFoam_scalar_field(os.path.join(tar_path, 'U')) # noqa: PTH118, N806
+ tar_R = read_openFoam_symmetric_tensor_field(os.path.join(tar_path, 'R')) # noqa: PTH118, N806
+ tar_L = read_openFoam_tensor_field(os.path.join(tar_path, 'L')) # noqa: PTH118, N806
- tar_U_ref = np.interp(ref_h, tar_p[:, 2], tar_U)
+ tar_U_ref = np.interp(ref_h, tar_p[:, 2], tar_U) # noqa: N806, F841
- tar_Iu = np.sqrt(tar_R[:, 0]) / tar_U
- tar_Iv = np.sqrt(tar_R[:, 3]) / tar_U
- tar_Iw = np.sqrt(tar_R[:, 5]) / tar_U
+ tar_Iu = np.sqrt(tar_R[:, 0]) / tar_U # noqa: N806
+ tar_Iv = np.sqrt(tar_R[:, 3]) / tar_U # noqa: N806
+ tar_Iw = np.sqrt(tar_R[:, 5]) / tar_U # noqa: N806
tar_uw = tar_R[:, 2]
- tar_Lu = tar_L[:, 0]
- tar_Lv = tar_L[:, 3]
- tar_Lw = tar_L[:, 6]
+ tar_Lu = tar_L[:, 0] # noqa: N806
+ tar_Lv = tar_L[:, 3] # noqa: N806
+ tar_Lw = tar_L[:, 6] # noqa: N806
- tar_I = np.zeros((3, len(tar_Iu)))
- tar_L = np.zeros((3, len(tar_Lu)))
+ tar_I = np.zeros((3, len(tar_Iu))) # noqa: N806
+ tar_L = np.zeros((3, len(tar_Lu))) # noqa: N806
tar_I[0, :] = tar_Iu
tar_I[1, :] = tar_Iv
@@ -788,7 +788,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_U,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -799,7 +799,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 1],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -833,7 +833,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Iu,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -844,7 +844,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 2],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -877,7 +877,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Iw,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -888,7 +888,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 3],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -921,7 +921,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Iw,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -932,7 +932,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 4],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -965,7 +965,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_uw,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -976,7 +976,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 5],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1009,7 +1009,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Lu,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -1020,7 +1020,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 6],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1053,7 +1053,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Lv,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -1064,7 +1064,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 7],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1097,7 +1097,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=tar_Lw,
y=tar_p[:, 2],
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target',
),
@@ -1108,7 +1108,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=prof_np[:, 8],
y=prof_np[:, 0],
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1139,7 +1139,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
fig.update_layout(height=850, width=1200, title_text='', showlegend=False)
fig.show()
fig.write_html(
- os.path.join(output_path, prof_name + '.html'),
+ os.path.join(output_path, prof_name + '.html'), # noqa: PTH118
include_mathjax='cdn',
)
@@ -1170,8 +1170,8 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
vertical_spacing=0.15,
)
- U_ref_prof = np.interp(spec_h[i], prof_np[:, 0], prof_np[:, 1])
- U_ref_tar = np.interp(spec_h[i], tar_p[:, 2], tar_U)
+ U_ref_prof = np.interp(spec_h[i], prof_np[:, 0], prof_np[:, 1]) # noqa: N806
+ U_ref_tar = np.interp(spec_h[i], tar_p[:, 2], tar_U) # noqa: N806
# Plot each component
for j in range(ncomp):
@@ -1185,8 +1185,8 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
spec = freq * spec / u_var
freq = freq * spec_h[i] / U_ref_prof
- tar_Iz = tar_I[j, loc_tar]
- tar_Lz = tar_L[j, loc_tar]
+ tar_Iz = tar_I[j, loc_tar] # noqa: N806
+ tar_Lz = tar_L[j, loc_tar] # noqa: N806
vonk_f = np.logspace(np.log10(f_min), np.log10(f_max), 200)
vonk_psd = von_karman_spectrum(vonk_f, U_ref_tar, tar_Iz, tar_Lz, j)
@@ -1198,7 +1198,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=freq,
y=spec,
- line=dict(color='firebrick', width=1.5),
+ line=dict(color='firebrick', width=1.5), # noqa: C408
mode='lines',
name=prof_name,
),
@@ -1209,7 +1209,7 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
go.Scatter(
x=vonk_f,
y=vonk_psd,
- line=dict(color='black', width=3.0, dash='dot'),
+ line=dict(color='black', width=3.0, dash='dot'), # noqa: C408
mode='lines',
name='Target(von Karman)',
),
@@ -1240,15 +1240,15 @@ def plot_wind_profiles_and_spectra(case_path, output_path, prof_name):
fig.update_layout(height=450, width=1500, title_text='', showlegend=False)
fig.show()
fig.write_html(
- os.path.join(
+ os.path.join( # noqa: PTH118
output_path, 'spectra_' + prof_name + '_H' + str(1 + i) + '.html'
),
include_mathjax='cdn',
)
-def plot_pressure_profile(case_path, output_path, prof_name):
- prof_path = os.path.join(case_path, 'postProcessing', prof_name)
+def plot_pressure_profile(case_path, output_path, prof_name): # noqa: D103
+ prof_path = os.path.join(case_path, 'postProcessing', prof_name) # noqa: PTH118
prof = PressureData(
prof_path, start_time=1.0, end_time=None, u_ref=0.0, rho=1.25, p_ref=0.0
@@ -1271,7 +1271,7 @@ def plot_pressure_profile(case_path, output_path, prof_name):
go.Scatter(
x=prof.x - np.min(prof.x),
y=std_p,
- line=dict(color='firebrick', width=2.5),
+ line=dict(color='firebrick', width=2.5), # noqa: C408
mode='lines+markers',
name=prof_name,
),
@@ -1303,7 +1303,7 @@ def plot_pressure_profile(case_path, output_path, prof_name):
fig.update_layout(height=400, width=800, title_text='', showlegend=False)
fig.show()
fig.write_html(
- os.path.join(output_path, 'pressure_' + prof_name + '.html'),
+ os.path.join(output_path, 'pressure_' + prof_name + '.html'), # noqa: PTH118
include_mathjax='cdn',
)
@@ -1325,15 +1325,15 @@ def plot_pressure_profile(case_path, output_path, prof_name):
case_path = arguments.case
- print('Case full path: ', case_path)
+ print('Case full path: ', case_path) # noqa: T201
# prof_name = sys.argv[2]
# Read JSON data
- json_path = os.path.join(
+ json_path = os.path.join( # noqa: PTH118
case_path, 'constant', 'simCenter', 'input', 'EmptyDomainCFD.json'
)
- with open(json_path) as json_file:
+ with open(json_path) as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1342,12 +1342,12 @@ def plot_pressure_profile(case_path, output_path, prof_name):
wind_profiles = rm_data['windProfiles']
vtk_planes = rm_data['vtkPlanes']
- prof_output_path = os.path.join(
+ prof_output_path = os.path.join( # noqa: PTH118
case_path, 'constant', 'simCenter', 'output', 'windProfiles'
)
# Check if it exists and remove files
- if os.path.exists(prof_output_path):
+ if os.path.exists(prof_output_path): # noqa: PTH110
shutil.rmtree(prof_output_path)
# Create new path
@@ -1357,8 +1357,8 @@ def plot_pressure_profile(case_path, output_path, prof_name):
for prof in wind_profiles:
name = prof['name']
field = prof['field']
- print(name)
- print(field)
+ print(name) # noqa: T201
+ print(field) # noqa: T201
if field == 'Velocity':
plot_wind_profiles_and_spectra(case_path, prof_output_path, name)
@@ -1371,8 +1371,8 @@ def plot_pressure_profile(case_path, output_path, prof_name):
name = pln['name']
field = pln['field']
- vtk_path = os.path.join(case_path, 'postProcessing', name)
- vtk_path_renamed = os.path.join(
+ vtk_path = os.path.join(case_path, 'postProcessing', name) # noqa: PTH118
+ vtk_path_renamed = os.path.join( # noqa: PTH118
case_path, 'postProcessing', name + '_renamed'
)
@@ -1381,5 +1381,5 @@ def plot_pressure_profile(case_path, output_path, prof_name):
copy_vtk_planes_and_order(vtk_path, vtk_path_renamed, field)
# Check if it exists and remove files
- if os.path.exists(vtk_path):
+ if os.path.exists(vtk_path): # noqa: PTH110
shutil.rmtree(vtk_path)
diff --git a/modules/createEVENT/SurroundedBuildingCFD/setup_case.py b/modules/createEVENT/SurroundedBuildingCFD/setup_case.py
index aa93fc29f..62950c3a4 100644
--- a/modules/createEVENT/SurroundedBuildingCFD/setup_case.py
+++ b/modules/createEVENT/SurroundedBuildingCFD/setup_case.py
@@ -1,7 +1,7 @@
"""This script writes BC and initial condition, and setups the OpenFoam case
directory.
-"""
+""" # noqa: INP001, D205, D404
import json
import os
@@ -12,7 +12,7 @@
from stl import mesh
-def create_building_geometry(width, depth, height, center):
+def create_building_geometry(width, depth, height, center): # noqa: D103
epsilon = 0.001 * min(width, depth, height)
# Define the 8 vertices of the building
@@ -58,7 +58,7 @@ def create_building_geometry(width, depth, height, center):
return bldg
-def create_surroundings_geometry(
+def create_surroundings_geometry( # noqa: D103
main_bldg_width,
main_bldg_depth,
sur_bldg_width,
@@ -106,7 +106,7 @@ def create_surroundings_geometry(
center_y = -y_max / 2.0 + iy * street_width_y + plan_y * (iy + 0.5)
# bldg_R = np.sqrt((abs(center_x) + sur_bldg_depth)**2.0 + (abs(center_y) + sur_bldg_width)**2.0)
- bldg_R = np.sqrt(center_x**2.0 + center_y**2.0)
+ bldg_R = np.sqrt(center_x**2.0 + center_y**2.0) # noqa: N806
# Add the building if it's within bounding radius
if bldg_R < bound_radius:
@@ -132,12 +132,12 @@ def create_surroundings_geometry(
# print(combined.is_closed())
- return combined
+ return combined # noqa: RET504
-def write_main_building_stl_file(input_json_path, case_path):
+def write_main_building_stl_file(input_json_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
geom_data = json_data['GeometricData']
@@ -160,9 +160,9 @@ def write_main_building_stl_file(input_json_path, case_path):
convert_to_meters = 0.0254
# Convert from full-scale to model-scale
- B = convert_to_meters * geom_data['buildingWidth'] / scale
- D = convert_to_meters * geom_data['buildingDepth'] / scale
- H = convert_to_meters * geom_data['buildingHeight'] / scale
+ B = convert_to_meters * geom_data['buildingWidth'] / scale # noqa: N806
+ D = convert_to_meters * geom_data['buildingDepth'] / scale # noqa: N806
+ H = convert_to_meters * geom_data['buildingHeight'] / scale # noqa: N806
origin = np.array(geom_data['origin'])
wind_dxn = geom_data['windDirection']
@@ -179,9 +179,9 @@ def write_main_building_stl_file(input_json_path, case_path):
bldg.save(case_path + '/constant/geometry/building.stl', mode=fmt)
-def write_surrounding_buildings_stl_file(input_json_path, case_path):
+def write_surrounding_buildings_stl_file(input_json_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
geom_data = json_data['GeometricData']
@@ -205,19 +205,19 @@ def write_surrounding_buildings_stl_file(input_json_path, case_path):
convert_to_meters = 0.0254
# Convert from full-scale to model-scale
- B = convert_to_meters * geom_data['buildingWidth'] / scale
- D = convert_to_meters * geom_data['buildingDepth'] / scale
- Sb = convert_to_meters * sur_data['surroundingBuildingsWidth'] / scale
- Sd = convert_to_meters * sur_data['surroundingBuildingsDepth'] / scale
- Sh = convert_to_meters * sur_data['surroundingBuildingsHeight'] / scale
- Swx = convert_to_meters * sur_data['streetWidthX'] / scale
- Swy = convert_to_meters * sur_data['streetWidthY'] / scale
- Rb = convert_to_meters * sur_data['boundingRadius'] / scale
+ B = convert_to_meters * geom_data['buildingWidth'] / scale # noqa: N806
+ D = convert_to_meters * geom_data['buildingDepth'] / scale # noqa: N806
+ Sb = convert_to_meters * sur_data['surroundingBuildingsWidth'] / scale # noqa: N806
+ Sd = convert_to_meters * sur_data['surroundingBuildingsDepth'] / scale # noqa: N806
+ Sh = convert_to_meters * sur_data['surroundingBuildingsHeight'] / scale # noqa: N806
+ Swx = convert_to_meters * sur_data['streetWidthX'] / scale # noqa: N806
+ Swy = convert_to_meters * sur_data['streetWidthY'] / scale # noqa: N806
+ Rb = convert_to_meters * sur_data['boundingRadius'] / scale # noqa: N806
# Normalize 0 to 1
rand = sur_data['randomness'] / 100.0
- origin = np.array(geom_data['origin'])
+ origin = np.array(geom_data['origin']) # noqa: F841
wind_dxn = geom_data['windDirection']
wind_dxn_rad = np.deg2rad(wind_dxn)
@@ -232,9 +232,9 @@ def write_surrounding_buildings_stl_file(input_json_path, case_path):
surroundings.save(case_path + '/constant/geometry/surroundings.stl', mode=fmt)
-def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
+def write_block_mesh_dict(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -243,12 +243,12 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
boundary_data = json_data['boundaryConditions']
origin = np.array(geom_data['origin'])
- scale = geom_data['geometricScale']
+ scale = geom_data['geometricScale'] # noqa: F841
- Lx = geom_data['domainLength']
- Ly = geom_data['domainWidth']
- Lz = geom_data['domainHeight']
- Lf = geom_data['fetchLength']
+ Lx = geom_data['domainLength'] # noqa: N806
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lz = geom_data['domainHeight'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
x_cells = mesh_data['xNumCells']
y_cells = mesh_data['yNumCells']
@@ -290,7 +290,7 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
z_max = z_min + Lz
# Open the template blockMeshDict (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/blockMeshDictTemplate')
+ dict_file = open(template_dict_path + '/blockMeshDictTemplate') # noqa: SIM115, PTH123
# Export to OpenFOAM probe format
dict_lines = dict_file.readlines()
@@ -335,18 +335,18 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
write_file_name = case_path + '/system/blockMeshDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
+def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -366,16 +366,16 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
prism_layer_surface_name = mesh_data['prismLayerSurfaceName']
prism_layer_relative_size = 'on'
- Lx = geom_data['domainLength']
- Ly = geom_data['domainWidth']
- Lz = geom_data['domainHeight']
- Lf = geom_data['fetchLength']
+ Lx = geom_data['domainLength'] # noqa: N806
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lz = geom_data['domainHeight'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
origin = np.array(geom_data['origin'])
num_cells_between_levels = mesh_data['numCellsBetweenLevels']
resolve_feature_angle = mesh_data['resolveFeatureAngle']
- num_processors = mesh_data['numProcessors']
+ num_processors = mesh_data['numProcessors'] # noqa: F841
refinement_boxes = mesh_data['refinementBoxes']
@@ -383,14 +383,14 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
y_min = -Ly / 2.0 - origin[1]
z_min = 0.0 - origin[2]
- x_max = x_min + Lx
+ x_max = x_min + Lx # noqa: F841
y_max = y_min + Ly
z_max = z_min + Lz
inside_point = [x_min + Lf / 2.0, (y_min + y_max) / 2.0, (z_min + z_max) / 2.0]
# Open the template blockMeshDict (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/snappyHexMeshDictTemplate')
+ dict_file = open(template_dict_path + '/snappyHexMeshDictTemplate') # noqa: SIM115, PTH123
# Export to OpenFOAM probe format
dict_lines = dict_file.readlines()
@@ -546,18 +546,18 @@ def write_snappy_hex_mesh_dict(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/snappyHexMeshDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_surfaceFeaturesDict_file(input_json_path, template_dict_path, case_path):
+def write_surfaceFeaturesDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -566,7 +566,7 @@ def write_surfaceFeaturesDict_file(input_json_path, template_dict_path, case_pat
surroundings_stl_name = domain_data['surroundingsSTLName']
# Open the template blockMeshDict (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/surfaceFeaturesDictTemplate')
+ dict_file = open(template_dict_path + '/surfaceFeaturesDictTemplate') # noqa: SIM115, PTH123
# Export to OpenFOAM probe format
dict_lines = dict_file.readlines()
@@ -581,10 +581,10 @@ def write_surfaceFeaturesDict_file(input_json_path, template_dict_path, case_pat
# Write edited dict to file
write_file_name = case_path + '/system/surfaceFeaturesDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
@@ -593,9 +593,9 @@ def write_surfaceFeaturesDict_file(input_json_path, template_dict_path, case_pat
def write_boundary_data_files(input_json_path, case_path):
"""This functions writes wind profile files in "constant/boundaryData/inlet"
if TInf options are used for the simulation.
- """
+ """ # noqa: D205, D401, D404
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -614,8 +614,8 @@ def write_boundary_data_files(input_json_path, case_path):
origin = np.array(geom_data['origin'])
- Ly = geom_data['domainWidth']
- Lf = geom_data['fetchLength']
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
x_min = -Lf - origin[0]
y_min = -Ly / 2.0 - origin[1]
@@ -641,25 +641,25 @@ def write_boundary_data_files(input_json_path, case_path):
foam.write_foam_field(wind_profiles[:, 8:17], bd_path + 'L')
-def write_U_file(input_json_path, template_dict_path, case_path):
+def write_U_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- inlet_BC_type = boundary_data['inletBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
+ inlet_BC_type = boundary_data['inletBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/UFileTemplate')
+ dict_file = open(template_dict_path + '/UFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -769,28 +769,28 @@ def write_U_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/U'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_p_file(input_json_path, template_dict_path, case_path):
+def write_p_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/pFileTemplate')
+ dict_file = open(template_dict_path + '/pFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -870,36 +870,36 @@ def write_p_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/p'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_nut_file(input_json_path, template_dict_path, case_path):
+def write_nut_file(input_json_path, template_dict_path, case_path): # noqa: C901, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
- building_BC_type = boundary_data['buildingBoundaryCondition']
- surrounding_BC_type = boundary_data['surroundingBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
+ building_BC_type = boundary_data['buildingBoundaryCondition'] # noqa: N806
+ surrounding_BC_type = boundary_data['surroundingBoundaryCondition'] # noqa: N806
# wind_speed = wind_data['roofHeightWindSpeed']
# building_height = wind_data['buildingHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/nutFileTemplate')
+ dict_file = open(template_dict_path + '/nutFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1021,34 +1021,34 @@ def write_nut_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/nut'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_epsilon_file(input_json_path, template_dict_path, case_path):
+def write_epsilon_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/epsilonFileTemplate')
+ dict_file = open(template_dict_path + '/epsilonFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1139,36 +1139,36 @@ def write_epsilon_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/epsilon'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_k_file(input_json_path, template_dict_path, case_path):
+def write_k_file(input_json_path, template_dict_path, case_path): # noqa: C901, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
- building_BC_type = boundary_data['buildingBoundaryCondition']
- surrounding_BC_type = boundary_data['surroundingBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
+ building_BC_type = boundary_data['buildingBoundaryCondition'] # noqa: N806
+ surrounding_BC_type = boundary_data['surroundingBoundaryCondition'] # noqa: N806
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/kFileTemplate')
+ dict_file = open(template_dict_path + '/kFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1176,7 +1176,7 @@ def write_k_file(input_json_path, template_dict_path, case_path):
# BC and initial condition (you may need to scale to model scale)
# k0 = 1.3 #not in model scale
- I = 0.1
+ I = 0.1 # noqa: N806, E741
k0 = 1.5 * (I * wind_speed) ** 2
# Internal Field #########################
@@ -1293,23 +1293,23 @@ def write_k_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/k'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_controlDict_file(input_json_path, template_dict_path, case_path):
+def write_controlDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
ns_data = json_data['numericalSetup']
- rm_data = json_data['resultMonitoring']
+ rm_data = json_data['resultMonitoring'] # noqa: F841
solver_type = ns_data['solverType']
duration = ns_data['duration']
@@ -1332,7 +1332,7 @@ def write_controlDict_file(input_json_path, template_dict_path, case_path):
purge_write = 3
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/controlDictTemplate')
+ dict_file = open(template_dict_path + '/controlDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1412,18 +1412,18 @@ def write_controlDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/controlDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_fvSolution_file(input_json_path, template_dict_path, case_path):
+def write_fvSolution_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1436,7 +1436,7 @@ def write_fvSolution_file(input_json_path, template_dict_path, case_path):
num_outer_correctors = ns_data['numOuterCorrectors']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/fvSolutionTemplate')
+ dict_file = open(template_dict_path + '/fvSolutionTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1471,18 +1471,18 @@ def write_fvSolution_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/fvSolution'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_base_forces_file(input_json_path, template_dict_path, case_path):
+def write_base_forces_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
air_density = 1.0
@@ -1490,14 +1490,14 @@ def write_base_forces_file(input_json_path, template_dict_path, case_path):
# Returns JSON object as a dictionary
rm_data = json_data['resultMonitoring']
- num_stories = rm_data['numStories']
- floor_height = rm_data['floorHeight']
+ num_stories = rm_data['numStories'] # noqa: F841
+ floor_height = rm_data['floorHeight'] # noqa: F841
center_of_rotation = rm_data['centerOfRotation']
base_load_write_interval = rm_data['baseLoadWriteInterval']
- monitor_base_load = rm_data['monitorBaseLoad']
+ monitor_base_load = rm_data['monitorBaseLoad'] # noqa: F841
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/baseForcesTemplate')
+ dict_file = open(template_dict_path + '/baseForcesTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1523,18 +1523,18 @@ def write_base_forces_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/baseForces'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_story_forces_file(input_json_path, template_dict_path, case_path):
+def write_story_forces_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
air_density = 1.0
@@ -1543,13 +1543,13 @@ def write_story_forces_file(input_json_path, template_dict_path, case_path):
rm_data = json_data['resultMonitoring']
num_stories = rm_data['numStories']
- floor_height = rm_data['floorHeight']
+ floor_height = rm_data['floorHeight'] # noqa: F841
center_of_rotation = rm_data['centerOfRotation']
story_load_write_interval = rm_data['storyLoadWriteInterval']
- monitor_base_load = rm_data['monitorBaseLoad']
+ monitor_base_load = rm_data['monitorBaseLoad'] # noqa: F841
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/storyForcesTemplate')
+ dict_file = open(template_dict_path + '/storyForcesTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1583,18 +1583,18 @@ def write_story_forces_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/storyForces'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_pressure_probes_file(input_json_path, template_dict_path, case_path):
+def write_pressure_probes_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1604,7 +1604,7 @@ def write_pressure_probes_file(input_json_path, template_dict_path, case_path):
pressure_write_interval = rm_data['pressureWriteInterval']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/probeTemplate')
+ dict_file = open(template_dict_path + '/probeTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1629,18 +1629,18 @@ def write_pressure_probes_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/pressureSamplingPoints'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
+def write_wind_profiles_file(input_json_path, template_dict_path, case_path): # noqa: C901, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1654,7 +1654,7 @@ def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
write_interval = rm_data['profileWriteInterval']
start_time = rm_data['profileStartTime']
- if rm_data['monitorWindProfile'] == False:
+ if rm_data['monitorWindProfile'] == False: # noqa: E712
return
if len(wind_profiles) == 0:
@@ -1663,7 +1663,7 @@ def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
# Write dict files for wind profiles
for prof in wind_profiles:
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/probeTemplate')
+ dict_file = open(template_dict_path + '/probeTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1730,18 +1730,18 @@ def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/' + name
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
+def write_vtk_plane_file(input_json_path, template_dict_path, case_path): # noqa: C901, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1753,7 +1753,7 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
vtk_planes = rm_data['vtkPlanes']
write_interval = rm_data['vtkWriteInterval']
- if rm_data['monitorVTKPlane'] == False:
+ if rm_data['monitorVTKPlane'] == False: # noqa: E712
return
if len(vtk_planes) == 0:
@@ -1762,7 +1762,7 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
# Write dict files for wind profiles
for pln in vtk_planes:
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/vtkPlaneTemplate')
+ dict_file = open(template_dict_path + '/vtkPlaneTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1831,30 +1831,30 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/' + name
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_momentumTransport_file(input_json_path, template_dict_path, case_path):
+def write_momentumTransport_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
turb_data = json_data['turbulenceModeling']
simulation_type = turb_data['simulationType']
- RANS_type = turb_data['RANSModelType']
- LES_type = turb_data['LESModelType']
- DES_type = turb_data['DESModelType']
+ RANS_type = turb_data['RANSModelType'] # noqa: N806
+ LES_type = turb_data['LESModelType'] # noqa: N806
+ DES_type = turb_data['DESModelType'] # noqa: N806
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/momentumTransportTemplate')
+ dict_file = open(template_dict_path + '/momentumTransportTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1886,18 +1886,18 @@ def write_momentumTransport_file(input_json_path, template_dict_path, case_path)
# Write edited dict to file
write_file_name = case_path + '/constant/momentumTransport'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_physicalProperties_file(input_json_path, template_dict_path, case_path):
+def write_physicalProperties_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1906,7 +1906,7 @@ def write_physicalProperties_file(input_json_path, template_dict_path, case_path
kinematic_viscosity = wc_data['kinematicViscosity']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/physicalPropertiesTemplate')
+ dict_file = open(template_dict_path + '/physicalPropertiesTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1918,18 +1918,18 @@ def write_physicalProperties_file(input_json_path, template_dict_path, case_path
# Write edited dict to file
write_file_name = case_path + '/constant/physicalProperties'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_transportProperties_file(input_json_path, template_dict_path, case_path):
+def write_transportProperties_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1938,7 +1938,7 @@ def write_transportProperties_file(input_json_path, template_dict_path, case_pat
kinematic_viscosity = wc_data['kinematicViscosity']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/transportPropertiesTemplate')
+ dict_file = open(template_dict_path + '/transportPropertiesTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1950,18 +1950,18 @@ def write_transportProperties_file(input_json_path, template_dict_path, case_pat
# Write edited dict to file
write_file_name = case_path + '/constant/transportProperties'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
+def write_fvSchemes_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1970,7 +1970,7 @@ def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
simulation_type = turb_data['simulationType']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + f'/fvSchemesTemplate{simulation_type}')
+ dict_file = open(template_dict_path + f'/fvSchemesTemplate{simulation_type}') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1978,18 +1978,18 @@ def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/fvSchemes'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
+def write_decomposeParDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1998,7 +1998,7 @@ def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
num_processors = ns_data['numProcessors']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/decomposeParDictTemplate')
+ dict_file = open(template_dict_path + '/decomposeParDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -2018,18 +2018,18 @@ def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/decomposeParDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
+def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
fmax = 200.0
@@ -2045,7 +2045,7 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
duration = duration * 1.010
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/DFSRTurbDictTemplate')
+ dict_file = open(template_dict_path + '/DFSRTurbDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -2073,10 +2073,10 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/constant/DFSRTurbDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
@@ -2091,7 +2091,7 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
case_path = sys.argv[3]
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
diff --git a/modules/createEVENT/SurroundedBuildingCFD/setup_case_multiple_stl_files.py b/modules/createEVENT/SurroundedBuildingCFD/setup_case_multiple_stl_files.py
index 6d45f0042..c23f81b4f 100644
--- a/modules/createEVENT/SurroundedBuildingCFD/setup_case_multiple_stl_files.py
+++ b/modules/createEVENT/SurroundedBuildingCFD/setup_case_multiple_stl_files.py
@@ -1,7 +1,7 @@
"""This script writes BC and initial condition, and setups the OpenFoam case
directory.
-"""
+""" # noqa: INP001, D205, D404
import json
import os
@@ -12,7 +12,7 @@
from stl import mesh
-def create_building_geometry(width, depth, height, center):
+def create_building_geometry(width, depth, height, center): # noqa: D103
epsilon = 0.01 * min(width, depth, height)
# Define the 8 vertices of the building
@@ -58,7 +58,7 @@ def create_building_geometry(width, depth, height, center):
return bldg
-def create_surroundings_geometry(
+def create_surroundings_geometry( # noqa: D103
main_bldg_width,
main_bldg_depth,
sur_bldg_width,
@@ -106,7 +106,7 @@ def create_surroundings_geometry(
center_y = -y_max / 2.0 + iy * street_width_y + plan_y * (iy + 0.5)
# bldg_R = np.sqrt((abs(center_x) + sur_bldg_depth)**2.0 + (abs(center_y) + sur_bldg_width)**2.0)
- bldg_R = np.sqrt(center_x**2.0 + center_y**2.0)
+ bldg_R = np.sqrt(center_x**2.0 + center_y**2.0) # noqa: N806
# Add the building if it's within bounding radius
if bldg_R < bound_radius:
@@ -186,9 +186,9 @@ def create_surroundings_geometry(
# return table
-def write_main_building_stl_file(input_json_path, case_path):
+def write_main_building_stl_file(input_json_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
geom_data = json_data['GeometricData']
@@ -211,9 +211,9 @@ def write_main_building_stl_file(input_json_path, case_path):
convert_to_meters = 0.0254
# Convert from full-scale to model-scale
- B = convert_to_meters * geom_data['buildingWidth'] / scale
- D = convert_to_meters * geom_data['buildingDepth'] / scale
- H = convert_to_meters * geom_data['buildingHeight'] / scale
+ B = convert_to_meters * geom_data['buildingWidth'] / scale # noqa: N806
+ D = convert_to_meters * geom_data['buildingDepth'] / scale # noqa: N806
+ H = convert_to_meters * geom_data['buildingHeight'] / scale # noqa: N806
origin = np.array(geom_data['origin'])
wind_dxn = geom_data['windDirection']
@@ -230,9 +230,9 @@ def write_main_building_stl_file(input_json_path, case_path):
bldg.save(case_path + '/constant/geometry/building.stl', mode=fmt)
-def write_surrounding_buildings_stl_file(input_json_path, case_path):
+def write_surrounding_buildings_stl_file(input_json_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
geom_data = json_data['GeometricData']
@@ -256,19 +256,19 @@ def write_surrounding_buildings_stl_file(input_json_path, case_path):
convert_to_meters = 0.0254
# Convert from full-scale to model-scale
- B = convert_to_meters * geom_data['buildingWidth'] / scale
- D = convert_to_meters * geom_data['buildingDepth'] / scale
- Sb = convert_to_meters * sur_data['surroundingBuildingsWidth'] / scale
- Sd = convert_to_meters * sur_data['surroundingBuildingsDepth'] / scale
- Sh = convert_to_meters * sur_data['surroundingBuildingsHeight'] / scale
- Swx = convert_to_meters * sur_data['streetWidthX'] / scale
- Swy = convert_to_meters * sur_data['streetWidthY'] / scale
- Rb = convert_to_meters * sur_data['boundingRadius'] / scale
+ B = convert_to_meters * geom_data['buildingWidth'] / scale # noqa: N806
+ D = convert_to_meters * geom_data['buildingDepth'] / scale # noqa: N806
+ Sb = convert_to_meters * sur_data['surroundingBuildingsWidth'] / scale # noqa: N806
+ Sd = convert_to_meters * sur_data['surroundingBuildingsDepth'] / scale # noqa: N806
+ Sh = convert_to_meters * sur_data['surroundingBuildingsHeight'] / scale # noqa: N806
+ Swx = convert_to_meters * sur_data['streetWidthX'] / scale # noqa: N806
+ Swy = convert_to_meters * sur_data['streetWidthY'] / scale # noqa: N806
+ Rb = convert_to_meters * sur_data['boundingRadius'] / scale # noqa: N806
# Normalize 0 to 1
rand = sur_data['randomness'] / 100.0
- origin = np.array(geom_data['origin'])
+ origin = np.array(geom_data['origin']) # noqa: F841
wind_dxn = geom_data['windDirection']
wind_dxn_rad = np.deg2rad(wind_dxn)
@@ -291,14 +291,14 @@ def write_surrounding_buildings_stl_file(input_json_path, case_path):
case_path + f'/constant/geometry/surr_bldg{bldg_count}.stl',
mode=fmt,
)
- bldg_count += 1
+ bldg_count += 1 # noqa: SIM113
return len(surroundings)
-def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
+def write_block_mesh_dict(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -307,12 +307,12 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
boundary_data = json_data['boundaryConditions']
origin = np.array(geom_data['origin'])
- scale = geom_data['geometricScale']
+ scale = geom_data['geometricScale'] # noqa: F841
- Lx = geom_data['domainLength']
- Ly = geom_data['domainWidth']
- Lz = geom_data['domainHeight']
- Lf = geom_data['fetchLength']
+ Lx = geom_data['domainLength'] # noqa: N806
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lz = geom_data['domainHeight'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
x_cells = mesh_data['xNumCells']
y_cells = mesh_data['yNumCells']
@@ -354,7 +354,7 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
z_max = z_min + Lz
# Open the template blockMeshDict (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/blockMeshDictTemplate')
+ dict_file = open(template_dict_path + '/blockMeshDictTemplate') # noqa: SIM115, PTH123
# Export to OpenFOAM probe format
dict_lines = dict_file.readlines()
@@ -399,30 +399,30 @@ def write_block_mesh_dict(input_json_path, template_dict_path, case_path):
write_file_name = case_path + '/system/blockMeshDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_snappy_hex_mesh_dict(
+def write_snappy_hex_mesh_dict( # noqa: C901, D103
input_json_path,
template_dict_path,
case_path,
n_surr_bldgs,
):
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
mesh_data = json_data['snappyHexMeshParameters']
add_surface_refinement = mesh_data['addSurfaceRefinements']
building_stl_name = mesh_data['buildingSTLName']
- surrounding_stl_name = mesh_data['surroundingsSTLName']
+ surrounding_stl_name = mesh_data['surroundingsSTLName'] # noqa: F841
add_edge_refinement = mesh_data['addEdgeRefinements']
surface_refinements = mesh_data['surfaceRefinements']
edge_refinements = mesh_data['edgeRefinements']
@@ -435,16 +435,16 @@ def write_snappy_hex_mesh_dict(
prism_layer_surface_name = mesh_data['prismLayerSurfaceName']
prism_layer_relative_size = 'on'
- Lx = geom_data['domainLength']
- Ly = geom_data['domainWidth']
- Lz = geom_data['domainHeight']
- Lf = geom_data['fetchLength']
+ Lx = geom_data['domainLength'] # noqa: N806
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lz = geom_data['domainHeight'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
origin = np.array(geom_data['origin'])
num_cells_between_levels = mesh_data['numCellsBetweenLevels']
resolve_feature_angle = mesh_data['resolveFeatureAngle']
- num_processors = mesh_data['numProcessors']
+ num_processors = mesh_data['numProcessors'] # noqa: F841
refinement_boxes = mesh_data['refinementBoxes']
@@ -452,14 +452,14 @@ def write_snappy_hex_mesh_dict(
y_min = -Ly / 2.0 - origin[1]
z_min = 0.0 - origin[2]
- x_max = x_min + Lx
+ x_max = x_min + Lx # noqa: F841
y_max = y_min + Ly
z_max = z_min + Lz
inside_point = [x_min + Lf / 2.0, (y_min + y_max) / 2.0, (z_min + z_max) / 2.0]
# Open the template blockMeshDict (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/snappyHexMeshDictTemplate')
+ dict_file = open(template_dict_path + '/snappyHexMeshDictTemplate') # noqa: SIM115, PTH123
# Export to OpenFOAM probe format
dict_lines = dict_file.readlines()
@@ -639,32 +639,32 @@ def write_snappy_hex_mesh_dict(
# Write edited dict to file
write_file_name = case_path + '/system/snappyHexMeshDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_surfaceFeaturesDict_file(
+def write_surfaceFeaturesDict_file( # noqa: N802, D103
input_json_path,
template_dict_path,
case_path,
n_surr_bldgs,
):
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
domain_data = json_data['snappyHexMeshParameters']
building_stl_name = domain_data['buildingSTLName']
- surroundings_stl_name = domain_data['surroundingsSTLName']
+ surroundings_stl_name = domain_data['surroundingsSTLName'] # noqa: F841
# Open the template blockMeshDict (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/surfaceFeaturesDictTemplate')
+ dict_file = open(template_dict_path + '/surfaceFeaturesDictTemplate') # noqa: SIM115, PTH123
# Export to OpenFOAM probe format
dict_lines = dict_file.readlines()
@@ -685,10 +685,10 @@ def write_surfaceFeaturesDict_file(
# Write edited dict to file
write_file_name = case_path + '/system/surfaceFeaturesDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
@@ -697,9 +697,9 @@ def write_surfaceFeaturesDict_file(
def write_boundary_data_files(input_json_path, case_path):
"""This functions writes wind profile files in "constant/boundaryData/inlet"
if TInf options are used for the simulation.
- """
+ """ # noqa: D205, D401, D404
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -716,8 +716,8 @@ def write_boundary_data_files(input_json_path, case_path):
origin = np.array(geom_data['origin'])
- Ly = geom_data['domainWidth']
- Lf = geom_data['fetchLength']
+ Ly = geom_data['domainWidth'] # noqa: N806
+ Lf = geom_data['fetchLength'] # noqa: N806
x_min = -Lf - origin[0]
y_min = -Ly / 2.0 - origin[1]
@@ -743,25 +743,25 @@ def write_boundary_data_files(input_json_path, case_path):
foam.write_foam_field(wind_profiles[:, 8:17], bd_path + 'L')
-def write_U_file(input_json_path, template_dict_path, case_path):
+def write_U_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- inlet_BC_type = boundary_data['inletBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
+ inlet_BC_type = boundary_data['inletBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/UFileTemplate')
+ dict_file = open(template_dict_path + '/UFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -857,28 +857,28 @@ def write_U_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/U'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_p_file(input_json_path, template_dict_path, case_path):
+def write_p_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/pFileTemplate')
+ dict_file = open(template_dict_path + '/pFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -942,34 +942,34 @@ def write_p_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/p'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_nut_file(input_json_path, template_dict_path, case_path):
+def write_nut_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
# wind_speed = wind_data['roofHeightWindSpeed']
# building_height = wind_data['buildingHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/nutFileTemplate')
+ dict_file = open(template_dict_path + '/nutFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1047,34 +1047,34 @@ def write_nut_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/nut'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_epsilon_file(input_json_path, template_dict_path, case_path):
+def write_epsilon_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/epsilonFileTemplate')
+ dict_file = open(template_dict_path + '/epsilonFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1165,34 +1165,34 @@ def write_epsilon_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/epsilon'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_k_file(input_json_path, template_dict_path, case_path):
+def write_k_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
boundary_data = json_data['boundaryConditions']
wind_data = json_data['windCharacteristics']
- sides_BC_type = boundary_data['sidesBoundaryCondition']
- top_BC_type = boundary_data['topBoundaryCondition']
- ground_BC_type = boundary_data['groundBoundaryCondition']
+ sides_BC_type = boundary_data['sidesBoundaryCondition'] # noqa: N806
+ top_BC_type = boundary_data['topBoundaryCondition'] # noqa: N806
+ ground_BC_type = boundary_data['groundBoundaryCondition'] # noqa: N806
wind_speed = wind_data['referenceWindSpeed']
building_height = wind_data['referenceHeight']
roughness_length = wind_data['aerodynamicRoughnessLength']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/kFileTemplate')
+ dict_file = open(template_dict_path + '/kFileTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1200,7 +1200,7 @@ def write_k_file(input_json_path, template_dict_path, case_path):
# BC and initial condition (you may need to scale to model scale)
# k0 = 1.3 #not in model scale
- I = 0.1
+ I = 0.1 # noqa: N806, E741
k0 = 1.5 * (I * wind_speed) ** 2
# Internal Field #########################
@@ -1279,23 +1279,23 @@ def write_k_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/0/k'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_controlDict_file(input_json_path, template_dict_path, case_path):
+def write_controlDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
ns_data = json_data['numericalSetup']
- rm_data = json_data['resultMonitoring']
+ rm_data = json_data['resultMonitoring'] # noqa: F841
solver_type = ns_data['solverType']
duration = ns_data['duration']
@@ -1315,7 +1315,7 @@ def write_controlDict_file(input_json_path, template_dict_path, case_path):
purge_write = 3
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/controlDictTemplate')
+ dict_file = open(template_dict_path + '/controlDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1388,18 +1388,18 @@ def write_controlDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/controlDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_fvSolution_file(input_json_path, template_dict_path, case_path):
+def write_fvSolution_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1412,7 +1412,7 @@ def write_fvSolution_file(input_json_path, template_dict_path, case_path):
num_outer_correctors = ns_data['numOuterCorrectors']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/fvSolutionTemplate')
+ dict_file = open(template_dict_path + '/fvSolutionTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1447,18 +1447,18 @@ def write_fvSolution_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/fvSolution'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_pressure_probes_file(input_json_path, template_dict_path, case_path):
+def write_pressure_probes_file(input_json_path, template_dict_path, case_path): # noqa: D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1468,7 +1468,7 @@ def write_pressure_probes_file(input_json_path, template_dict_path, case_path):
pressure_write_interval = rm_data['pressureWriteInterval']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/probeTemplate')
+ dict_file = open(template_dict_path + '/probeTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1493,18 +1493,18 @@ def write_pressure_probes_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/pressureSamplingPoints'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
+def write_wind_profiles_file(input_json_path, template_dict_path, case_path): # noqa: C901, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1518,7 +1518,7 @@ def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
write_interval = rm_data['profileWriteInterval']
start_time = rm_data['profileStartTime']
- if rm_data['monitorWindProfile'] == False:
+ if rm_data['monitorWindProfile'] == False: # noqa: E712
return
if len(wind_profiles) == 0:
@@ -1527,7 +1527,7 @@ def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
# Write dict files for wind profiles
for prof in wind_profiles:
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/probeTemplate')
+ dict_file = open(template_dict_path + '/probeTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1594,18 +1594,18 @@ def write_wind_profiles_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/' + name
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
+def write_vtk_plane_file(input_json_path, template_dict_path, case_path): # noqa: C901, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1617,7 +1617,7 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
vtk_planes = rm_data['vtkPlanes']
write_interval = rm_data['vtkWriteInterval']
- if rm_data['monitorVTKPlane'] == False:
+ if rm_data['monitorVTKPlane'] == False: # noqa: E712
return
if len(vtk_planes) == 0:
@@ -1626,7 +1626,7 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
# Write dict files for wind profiles
for pln in vtk_planes:
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/vtkPlaneTemplate')
+ dict_file = open(template_dict_path + '/vtkPlaneTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1695,30 +1695,30 @@ def write_vtk_plane_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/' + name
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_momentumTransport_file(input_json_path, template_dict_path, case_path):
+def write_momentumTransport_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
turb_data = json_data['turbulenceModeling']
simulation_type = turb_data['simulationType']
- RANS_type = turb_data['RANSModelType']
- LES_type = turb_data['LESModelType']
- DES_type = turb_data['DESModelType']
+ RANS_type = turb_data['RANSModelType'] # noqa: N806
+ LES_type = turb_data['LESModelType'] # noqa: N806
+ DES_type = turb_data['DESModelType'] # noqa: N806
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/momentumTransportTemplate')
+ dict_file = open(template_dict_path + '/momentumTransportTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1750,18 +1750,18 @@ def write_momentumTransport_file(input_json_path, template_dict_path, case_path)
# Write edited dict to file
write_file_name = case_path + '/constant/momentumTransport'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_physicalProperties_file(input_json_path, template_dict_path, case_path):
+def write_physicalProperties_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1770,7 +1770,7 @@ def write_physicalProperties_file(input_json_path, template_dict_path, case_path
kinematic_viscosity = wc_data['kinematicViscosity']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/physicalPropertiesTemplate')
+ dict_file = open(template_dict_path + '/physicalPropertiesTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1782,18 +1782,18 @@ def write_physicalProperties_file(input_json_path, template_dict_path, case_path
# Write edited dict to file
write_file_name = case_path + '/constant/physicalProperties'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_transportProperties_file(input_json_path, template_dict_path, case_path):
+def write_transportProperties_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1802,7 +1802,7 @@ def write_transportProperties_file(input_json_path, template_dict_path, case_pat
kinematic_viscosity = wc_data['kinematicViscosity']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/transportPropertiesTemplate')
+ dict_file = open(template_dict_path + '/transportPropertiesTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1814,18 +1814,18 @@ def write_transportProperties_file(input_json_path, template_dict_path, case_pat
# Write edited dict to file
write_file_name = case_path + '/constant/transportProperties'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
+def write_fvSchemes_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1834,7 +1834,7 @@ def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
simulation_type = turb_data['simulationType']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + f'/fvSchemesTemplate{simulation_type}')
+ dict_file = open(template_dict_path + f'/fvSchemesTemplate{simulation_type}') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1842,18 +1842,18 @@ def write_fvSchemes_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/fvSchemes'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
+def write_decomposeParDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
@@ -1862,7 +1862,7 @@ def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
num_processors = ns_data['numProcessors']
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/decomposeParDictTemplate')
+ dict_file = open(template_dict_path + '/decomposeParDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1882,18 +1882,18 @@ def write_decomposeParDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/system/decomposeParDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
-def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
+def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path): # noqa: N802, D103
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
fmax = 200.0
@@ -1909,7 +1909,7 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
duration = duration * 1.010
# Open the template file (OpenFOAM file) for manipulation
- dict_file = open(template_dict_path + '/DFSRTurbDictTemplate')
+ dict_file = open(template_dict_path + '/DFSRTurbDictTemplate') # noqa: SIM115, PTH123
dict_lines = dict_file.readlines()
dict_file.close()
@@ -1937,10 +1937,10 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
# Write edited dict to file
write_file_name = case_path + '/constant/DFSRTurbDict'
- if os.path.exists(write_file_name):
- os.remove(write_file_name)
+ if os.path.exists(write_file_name): # noqa: PTH110
+ os.remove(write_file_name) # noqa: PTH107
- output_file = open(write_file_name, 'w+')
+ output_file = open(write_file_name, 'w+') # noqa: SIM115, PTH123
for line in dict_lines:
output_file.write(line)
output_file.close()
@@ -1955,7 +1955,7 @@ def write_DFSRTurbDict_file(input_json_path, template_dict_path, case_path):
case_path = sys.argv[3]
# Read JSON data
- with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file:
+ with open(input_json_path + '/SurroundedBuildingCFD.json') as json_file: # noqa: PTH123
json_data = json.load(json_file)
# Returns JSON object as a dictionary
diff --git a/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py b/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py
index 42d2f69a3..28342461c 100644
--- a/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py
+++ b/modules/createEVENT/coupledDigitalTwin/CoupledDigitalTwin.py
@@ -1,25 +1,25 @@
-import argparse
+import argparse # noqa: INP001, D100
import json
-class FloorForces:
+class FloorForces: # noqa: D101
def __init__(self):
self.X = [0]
self.Y = [0]
self.Z = [0]
-def directionToDof(direction):
- """Converts direction to degree of freedom"""
- directioMap = {'X': 1, 'Y': 2, 'Z': 3}
+def directionToDof(direction): # noqa: N802
+ """Converts direction to degree of freedom""" # noqa: D400, D401
+ directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806
return directioMap[direction]
-def addFloorForceToEvent(patternsArray, force, direction, floor):
- """Add force (one component) time series and pattern in the event file"""
- seriesName = 'WindForceSeries_' + str(floor) + direction
- patternName = 'WindForcePattern_' + str(floor) + direction
+def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803
+ """Add force (one component) time series and pattern in the event file""" # noqa: D400
+ seriesName = 'WindForceSeries_' + str(floor) + direction # noqa: N806
+ patternName = 'WindForcePattern_' + str(floor) + direction # noqa: N806
pattern = {
'name': patternName,
'timeSeries': seriesName,
@@ -31,10 +31,10 @@ def addFloorForceToEvent(patternsArray, force, direction, floor):
patternsArray.append(pattern)
-def writeEVENT(forces, eventFilePath):
- """This method writes the EVENT.json file"""
- patternsArray = []
- windEventJson = {
+def writeEVENT(forces, eventFilePath): # noqa: N802, N803
+ """This method writes the EVENT.json file""" # noqa: D400, D401, D404
+ patternsArray = [] # noqa: N806
+ windEventJson = { # noqa: N806
'type': 'Hydro',
'subtype': 'CoupledDigitalTwin',
'pattern': patternsArray,
@@ -44,20 +44,20 @@ def writeEVENT(forces, eventFilePath):
}
# Creating the event dictionary that will be used to export the EVENT json file
- eventDict = {'randomVariables': [], 'Events': [windEventJson]}
+ eventDict = {'randomVariables': [], 'Events': [windEventJson]} # noqa: N806
# Adding floor forces
- for floorForces in forces:
+ for floorForces in forces: # noqa: N806
floor = forces.index(floorForces) + 1
addFloorForceToEvent(patternsArray, floorForces.X, 'X', floor)
addFloorForceToEvent(patternsArray, floorForces.Y, 'Y', floor)
- with open(eventFilePath, 'w', encoding='utf-8') as eventsFile:
+ with open(eventFilePath, 'w', encoding='utf-8') as eventsFile: # noqa: PTH123, N806
json.dump(eventDict, eventsFile)
-def GetFloorsCount(BIMFilePath):
- with open(BIMFilePath, encoding='utf-8') as BIMFile:
+def GetFloorsCount(BIMFilePath): # noqa: N802, N803, D103
+ with open(BIMFilePath, encoding='utf-8') as BIMFile: # noqa: PTH123, N806
bim = json.load(BIMFile)
return int(bim['GeneralInformation']['stories'])
@@ -78,11 +78,11 @@ def GetFloorsCount(BIMFilePath):
# parsing arguments
arguments, unknowns = parser.parse_known_args()
- if arguments.getRV == True:
+ if arguments.getRV == True: # noqa: E712
# Read the number of floors
- floorsCount = GetFloorsCount(arguments.filenameAIM)
+ floorsCount = GetFloorsCount(arguments.filenameAIM) # noqa: N816
forces = []
- for i in range(floorsCount):
- forces.append(FloorForces())
+ for i in range(floorsCount): # noqa: B007
+ forces.append(FloorForces()) # noqa: PERF401
# write the event file
writeEVENT(forces, arguments.filenameEVENT)
diff --git a/modules/createEVENT/experimentalWindForces/convertWindMat.py b/modules/createEVENT/experimentalWindForces/convertWindMat.py
index 1cacbbf7a..81c747429 100644
--- a/modules/createEVENT/experimentalWindForces/convertWindMat.py
+++ b/modules/createEVENT/experimentalWindForces/convertWindMat.py
@@ -1,4 +1,4 @@
-# python code to open the .mat file
+# python code to open the .mat file # noqa: INP001, D100
# and put data into a SimCenter JSON file
import json
@@ -8,9 +8,9 @@
import scipy.io as sio
-def parseWindMatFile(matFileIn, windFileOutName):
- dataDir = os.getcwd()
- scriptDir = os.path.dirname(os.path.realpath(__file__))
+def parseWindMatFile(matFileIn, windFileOutName): # noqa: N802, N803, D103
+ dataDir = os.getcwd() # noqa: PTH109, N806, F841
+ scriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N806, F841
mat_contents = sio.loadmat(matFileIn)
@@ -18,11 +18,11 @@ def parseWindMatFile(matFileIn, windFileOutName):
breadth = float(mat_contents['B'][0])
height = float(mat_contents['H'][0])
fs = float(mat_contents['fs'][0])
- vRef = float(mat_contents['Vref'][0])
+ vRef = float(mat_contents['Vref'][0]) # noqa: N806
if 's_target' in mat_contents:
- case = 'spectra'
- comp_CFmean = np.squeeze(np.array(mat_contents['comp_CFmean']))
+ case = 'spectra' # noqa: F841
+ comp_CFmean = np.squeeze(np.array(mat_contents['comp_CFmean'])) # noqa: N806
norm_all = np.squeeze(np.array(mat_contents['norm_all']))
f_target = np.squeeze(np.array(mat_contents['f_target']))
s_target = np.squeeze(np.array(mat_contents['s_target']))
@@ -41,12 +41,12 @@ def parseWindMatFile(matFileIn, windFileOutName):
)
elif 'Fx' in mat_contents:
- Fx = np.squeeze(np.array(mat_contents['Fx']))
- Fy = np.squeeze(np.array(mat_contents['Fy']))
- Tz = np.squeeze(np.array(mat_contents['Tz']))
+ Fx = np.squeeze(np.array(mat_contents['Fx'])) # noqa: N806
+ Fy = np.squeeze(np.array(mat_contents['Fy'])) # noqa: N806
+ Tz = np.squeeze(np.array(mat_contents['Tz'])) # noqa: N806
t = np.squeeze(np.array(mat_contents['t']))
- myJson = {}
+ myJson = {} # noqa: N806
myJson['D'] = depth
myJson['H'] = height
myJson['B'] = breadth
@@ -57,7 +57,7 @@ def parseWindMatFile(matFileIn, windFileOutName):
myJson['Fy'] = np.array(Fy).tolist()
myJson['Tz'] = np.array(Tz).tolist()
myJson['t'] = np.array(t).tolist()
- with open(windFileOutName, 'w') as f:
+ with open(windFileOutName, 'w') as f: # noqa: PTH123
json.dump(myJson, f)
# file = open(windFileOutName,"w")
@@ -127,33 +127,33 @@ def parseWindMatFile(matFileIn, windFileOutName):
# Check valid JSON file,
validate = True
if validate:
- with open(windFileOutName) as infile:
+ with open(windFileOutName) as infile: # noqa: PTH123
json_data = infile.read()
# Try to parse the JSON data
try:
- json_object = json.loads(json_data)
- print('JSON file is valid')
+ json_object = json.loads(json_data) # noqa: F841
+ print('JSON file is valid') # noqa: T201
except json.decoder.JSONDecodeError:
- print('JSON file is not valid')
+ print('JSON file is not valid') # noqa: T201
-def createSpectraJson(
- windFileOutName,
+def createSpectraJson( # noqa: N802, D103
+ windFileOutName, # noqa: N803
breadth,
depth,
height,
fs,
- vRef,
+ vRef, # noqa: N803
f_target,
s_target,
- comp_CFmean,
+ comp_CFmean, # noqa: N803
norm_all,
):
- ncomp = comp_CFmean.shape[0]
- nf = f_target.shape[0]
+ ncomp = comp_CFmean.shape[0] # noqa: F841
+ nf = f_target.shape[0] # noqa: F841
- myJson = {}
+ myJson = {} # noqa: N806
myJson['D'] = depth
myJson['H'] = height
myJson['B'] = breadth
@@ -166,21 +166,21 @@ def createSpectraJson(
myJson['s_target_real'] = np.real(s_target).tolist()
myJson['s_target_imag'] = np.imag(s_target).tolist()
- with open(windFileOutName, 'w') as f:
+ with open(windFileOutName, 'w') as f: # noqa: PTH123
json.dump(myJson, f)
# Check valid JSON file
validate = True
if validate:
- with open(windFileOutName) as infile:
+ with open(windFileOutName) as infile: # noqa: PTH123
json_data = infile.read()
# Try to parse the JSON data
try:
- json_object = json.loads(json_data)
- print('JSON file is valid')
+ json_object = json.loads(json_data) # noqa: F841
+ print('JSON file is valid') # noqa: T201
except json.decoder.JSONDecodeError:
- print('JSON file is not valid')
+ print('JSON file is not valid') # noqa: T201
# file = open(windFileOutName,"w")
# file.write("{")
@@ -251,21 +251,21 @@ def createSpectraJson(
# file.close()
-def createPODJson(
+def createPODJson( # noqa: N802, D103
filename,
- V,
- D1,
- SpeN,
+ V, # noqa: N803
+ D1, # noqa: N803
+ SpeN, # noqa: N803
f_target,
norm_all,
- D,
- H,
- B,
+ D, # noqa: N803
+ H, # noqa: N803
+ B, # noqa: N803
fs,
- vRef,
- comp_CFmean,
+ vRef, # noqa: N803
+ comp_CFmean, # noqa: N803
):
- myJson = {}
+ myJson = {} # noqa: N806
myJson['V_imag'] = np.imag(V).tolist()
myJson['V_real'] = np.real(V).tolist()
myJson['D1'] = D1.tolist()
@@ -279,5 +279,5 @@ def createPODJson(
myJson['fs'] = fs
myJson['Vref'] = vRef
- with open(filename, 'w') as f:
+ with open(filename, 'w') as f: # noqa: PTH123
json.dump(myJson, f)
diff --git a/modules/createEVENT/experimentalWindForces/experimentalWindForces.py b/modules/createEVENT/experimentalWindForces/experimentalWindForces.py
index bb9d2102c..55a93d117 100644
--- a/modules/createEVENT/experimentalWindForces/experimentalWindForces.py
+++ b/modules/createEVENT/experimentalWindForces/experimentalWindForces.py
@@ -1,25 +1,25 @@
-import json
+import json # noqa: INP001, D100
import os
import time
try:
- moduleName = 'numpy'
+ moduleName = 'numpy' # noqa: N816
import numpy as np
- moduleName = 'scipy'
+ moduleName = 'scipy' # noqa: N816
from scipy.interpolate import interp1d
from scipy.signal import csd, windows
error_tag = False # global variable
-except:
+except: # noqa: E722
error_tag = True
-from convertWindMat import *
+from convertWindMat import * # noqa: F403
-def main(aimName, evtName, getRV):
+def main(aimName, evtName, getRV): # noqa: C901, N803, D103, PLR0915
# THIS IS PERFORMED ONLY ONCE with open(aimName, 'r', encoding='utf-8') as f:
- with open(aimName) as f:
+ with open(aimName) as f: # noqa: PTH123
aim_data = json.load(f)
evt_data = aim_data['Events'][0]
@@ -27,8 +27,8 @@ def main(aimName, evtName, getRV):
filename = evt_data['filename']
# from UI
- V_H = evt_data['windSpeed'] # wind speed at full scale (vel)
- T_full = evt_data[
+ V_H = evt_data['windSpeed'] # wind speed at full scale (vel) # noqa: N806
+ T_full = evt_data[ # noqa: N806
'fullScaleDuration'
] # Duration of wind load at full scale (time)
perc_mod = evt_data[
@@ -43,16 +43,16 @@ def main(aimName, evtName, getRV):
if filename.endswith('.mat'):
mat_filenmae = filename
- base = os.path.splitext(mat_filenmae)[0]
+ base = os.path.splitext(mat_filenmae)[0] # noqa: PTH122
json_filename = base + '.json'
if getRV:
- parseWindMatFile(mat_filenmae, json_filename)
- os.remove(mat_filenmae)
+ parseWindMatFile(mat_filenmae, json_filename) # noqa: F405
+ os.remove(mat_filenmae) # noqa: PTH107
filename = json_filename
- with open(filename, encoding='utf-8') as jsonFile:
+ with open(filename, encoding='utf-8') as jsonFile: # noqa: PTH123, N806
data = json.load(jsonFile)
if not getRV:
@@ -60,7 +60,7 @@ def main(aimName, evtName, getRV):
elif evt_data['type'] == 'WindForceSpectrum': # creates {forceSpectra}.json
if ('s_target_real' not in data) or ('s_target_imag' not in data):
- raise Exception(
+ raise Exception( # noqa: TRY002
'Target Spectrum info not found in ' + evt_data['filename'] + '.'
)
@@ -70,7 +70,7 @@ def main(aimName, evtName, getRV):
evt_data['type'] == 'ExperimentalWindForces'
): # creates {forceTimehistory}.json here and later overwrites it with {forceSpectra}.json
if ('Fx' not in data) or ('Fy' not in data) or ('Tz' not in data):
- raise Exception(
+ raise Exception( # noqa: TRY002
'Force time histories not found in ' + evt_data['filename'] + '.'
)
@@ -83,13 +83,13 @@ def main(aimName, evtName, getRV):
# case = "spectra"
else:
- raise Exception('Event type [' + evt_data['type'] + '] not found.')
+ raise Exception('Event type [' + evt_data['type'] + '] not found.') # noqa: TRY002
- D = data['D']
- H = data['H']
- B = data['B']
+ D = data['D'] # noqa: N806
+ H = data['H'] # noqa: N806
+ B = data['B'] # noqa: N806
fs = data['fs']
- vRef = data['Vref']
+ vRef = data['Vref'] # noqa: N806
#
# check if model scale is found in the key
@@ -97,31 +97,31 @@ def main(aimName, evtName, getRV):
ms = evt_data.get('modelScale', 0) # model scale
if ms == 0: # when mat file is imported, model scale is not precalculated
- print('Model scale not found. Calculating the unified model scale..')
- D_full = aim_data['GeneralInformation']['depth']
- H_full = aim_data['GeneralInformation']['height']
- B_full = aim_data['GeneralInformation']['width']
+ print('Model scale not found. Calculating the unified model scale..') # noqa: T201
+ D_full = aim_data['GeneralInformation']['depth'] # noqa: N806
+ H_full = aim_data['GeneralInformation']['height'] # noqa: N806
+ B_full = aim_data['GeneralInformation']['width'] # noqa: N806
ms = H_full / H
- print(f'Model scaling factor of {ms:.2} is used')
+ print(f'Model scaling factor of {ms:.2} is used') # noqa: T201
if ((ms != D_full / D) or (ms != B_full / B)) and getRV:
- print(
+ print( # noqa: T201
f'Warning: target-data geometry scaling ratio is inconsistent: H={H_full / H:.2}, B={B_full / B:.2}, D={D_full / D:.2}'
)
if case == 'timeHistory':
# Tw = 4 # duration of window (sec) - user defined - smaller window leads to more smoothing
# overlap = 0.5 # 50% overlap - user defined
- Tw = evt_data['windowSize']
+ Tw = evt_data['windowSize'] # noqa: N806
overlap = evt_data['overlapPerc'] / 100
- Fx = np.array(data['Fx'])
- Fy = np.array(data['Fy'])
- Tz = np.array(data['Tz'])
+ Fx = np.array(data['Fx']) # noqa: N806
+ Fy = np.array(data['Fy']) # noqa: N806
+ Tz = np.array(data['Tz']) # noqa: N806
- t = data['t']
- N = Fx.shape[1]
+ t = data['t'] # noqa: F841
+ N = Fx.shape[1] # noqa: N806
nfloors = Fx.shape[0]
- nfloors_GI = aim_data['GeneralInformation']['NumberOfStories']
+ nfloors_GI = aim_data['GeneralInformation']['NumberOfStories'] # noqa: N806
if nfloors != nfloors_GI:
err_exit(
@@ -134,17 +134,17 @@ def main(aimName, evtName, getRV):
s_target = s_target_real + 1j * s_target_imag
f_target = np.array(data['f_target'])
norm_all = np.array(data['norm_all'])
- comp_CFmean = np.array(data['comp_CFmean'])
+ comp_CFmean = np.array(data['comp_CFmean']) # noqa: N806
elif case == 'PODmodes':
- V_imag = np.array(data['V_imag'])
- V_real = np.array(data['V_real'])
- V = V_real + 1j * V_imag
- D1 = np.array(data['D1'])
- SpeN = data['SpeN']
+ V_imag = np.array(data['V_imag']) # noqa: N806
+ V_real = np.array(data['V_real']) # noqa: N806
+ V = V_real + 1j * V_imag # noqa: N806
+ D1 = np.array(data['D1']) # noqa: N806
+ SpeN = data['SpeN'] # noqa: N806
f_target = np.array(data['f_target'])
norm_all = np.array(data['norm_all'])
- comp_CFmean = np.array(data['comp_CFmean'])
+ comp_CFmean = np.array(data['comp_CFmean']) # noqa: N806
#
# Below here is fully parameterized
#
@@ -153,7 +153,7 @@ def main(aimName, evtName, getRV):
# Compute the basic quantities
#
- dtm = 1 / fs # time step model scale
+ dtm = 1 / fs # time step model scale # noqa: F841
fc = fs / 2 # Nyquist Frequency (Hz) wind tunnel
fp = fs / ms # scaled frequency
fcut = fc / ms # scaled Nyquist frequency
@@ -161,10 +161,10 @@ def main(aimName, evtName, getRV):
ndir = 3 # number of coordinate axes (X,Y,Z)
if case == 'timeHistory': # Experimental wind forces
- T = N / fs # duration of simulation in model scale (s)
+ T = N / fs # duration of simulation in model scale (s) # noqa: N806, F841
ncomp = nfloors * ndir # total number of force components
- elif case == 'spectra' or case == 'PODmodes':
+ elif case == 'spectra' or case == 'PODmodes': # noqa: PLR1714
ncomp = comp_CFmean.shape[0]
nfloors = int(ncomp / ndir)
@@ -175,26 +175,26 @@ def main(aimName, evtName, getRV):
l_mo = int(
np.round(ncomp * ((perc_mod) / 100) + 1.0e-10)
) # small value added to make .5 round up
- if l_mo > 100 or l_mo < 0:
- msg = 'Error: Number of modes should be equal or less than the number of components'
+ if l_mo > 100 or l_mo < 0: # noqa: PLR2004
+ msg = 'Error: Number of modes should be equal or less than the number of components' # noqa: F841
- print('Number of modes = ' + str(l_mo))
+ print('Number of modes = ' + str(l_mo)) # noqa: T201
#
# Scaling building geometry
#
- B_full = B * ms # full scale
- D_full = D * ms # full scale
- H_full = H * ms # full scale
- MaxD_full = max(D_full, B_full) # full scale
+ B_full = B * ms # full scale # noqa: N806
+ D_full = D * ms # full scale # noqa: N806
+ H_full = H * ms # full scale # noqa: N806
+ MaxD_full = max(D_full, B_full) # full scale # noqa: N806
#
# Get CPSD
#
if case == 'timeHistory':
- [s_target, f_target, norm_all, comp_CFmean, Fx_full, Fy_full, Tz_full] = (
+ [s_target, f_target, norm_all, comp_CFmean, Fx_full, Fy_full, Tz_full] = ( # noqa: N806
learn_CPSD(
Fx,
Fy,
@@ -220,12 +220,12 @@ def main(aimName, evtName, getRV):
# Eigen decomposition
#
- if (case == 'timeHistory') or (case == 'spectra'):
- V, D1, SpeN = perform_POD(s_target, f_target, ncomp, l_mo)
+ if (case == 'timeHistory') or (case == 'spectra'): # noqa: PLR1714
+ V, D1, SpeN = perform_POD(s_target, f_target, ncomp, l_mo) # noqa: N806
if getRV:
# # let us overwrite the json file.
- createPODJson(
+ createPODJson( # noqa: F405
filename,
V,
D1,
@@ -245,30 +245,30 @@ def main(aimName, evtName, getRV):
#
f_full = f_target[0:] # don't exclude freq = 0 Hz
- f_vH = (V_H / vRef) * f_full # scaledfreq.(Hz)
- V_vH = V # scaled eigenmodes
- D_vH = (V_H / vRef) ** 3 * D1 # scaled eigenvalues
- theta_vH = np.arctan2(np.imag(V_vH), np.real(V_vH)) # scaled theta
+ f_vH = (V_H / vRef) * f_full # scaledfreq.(Hz) # noqa: N806
+ V_vH = V # scaled eigenmodes # noqa: N806
+ D_vH = (V_H / vRef) ** 3 * D1 # scaled eigenvalues # noqa: N806
+ theta_vH = np.arctan2(np.imag(V_vH), np.real(V_vH)) # scaled theta # noqa: N806
fcut_sc = (V_H / vRef) * fcut
f_inc = 1 / T_full # freq.incremen
# number of freq.points consideredt(Hz)
- N_f = round(T_full * fcut_sc) + 1
+ N_f = round(T_full * fcut_sc) + 1 # noqa: N806
dt = 1 / (2 * fcut_sc) # max.time incremen to avoid aliasing(s)
- N_t = round(T_full / dt) # number of time points
- fvec = np.arange(0, f_inc * (N_f), f_inc) # frequency line
+ N_t = round(T_full / dt) # number of time points # noqa: N806
+ fvec = np.arange(0, f_inc * (N_f), f_inc) # frequency line # noqa: F841
tvec = np.arange(0, dt * (N_t), dt) # time line
f = f_vH[0:SpeN] # frequencies from the decomposition upto SpeN points(Hz)
nf_dir = np.arange(ncomp) # vector number of components
- Nsim = 1 # Number of realizations to be generated
+ Nsim = 1 # Number of realizations to be generated # noqa: N806
seeds = np.arange(seed, Nsim + seed) # Set seeds for reproducibility
- CF_sim0 = np.zeros((len(seeds), ncomp, N_t))
+ CF_sim0 = np.zeros((len(seeds), ncomp, N_t)) # noqa: N806
for seed_num in range(len(seeds)):
- print(f'Creating Realization # {seed_num + 1} among {len(seeds)} ')
+ print(f'Creating Realization # {seed_num + 1} among {len(seeds)} ') # noqa: T201
t_init = time.time()
- F_jzm = simulation_gaussian(
+ F_jzm = simulation_gaussian( # noqa: N806
ncomp,
N_t,
V_vH,
@@ -290,18 +290,18 @@ def main(aimName, evtName, getRV):
F_jzm # zero-mean force coefficient time series (simulation)
)
- print(f' - Elapsed time: {time.time() - t_init:.3} seconds.\n')
+ print(f' - Elapsed time: {time.time() - t_init:.3} seconds.\n') # noqa: T201
#
# Destandardize force coefficients
#
#
- CF_sim1 = (
+ CF_sim1 = ( # noqa: N806
np.transpose(CF_sim0, (1, 2, 0)) / (V_H / vRef) ** 3 / np.sqrt(V_H / vRef)
) # rescale Force Coefficients
- CF_sim = CF_sim1 * np.transpose(
+ CF_sim = CF_sim1 * np.transpose( # noqa: N806
norm_all[np.newaxis, np.newaxis], (2, 1, 0)
) + np.transpose(
comp_CFmean[np.newaxis, np.newaxis], (2, 1, 0)
@@ -315,7 +315,7 @@ def main(aimName, evtName, getRV):
* (0.5 * air_dens * vRef**2 * H_full * MaxD_full**2 / 2),
)
)
- F_sim = (
+ F_sim = ( # noqa: N806
(V_H / vRef) ** 2 * CF_sim * static_pres
) # simulated forces at full scale wind speed
@@ -326,9 +326,9 @@ def main(aimName, evtName, getRV):
#
if getRV:
- F_sim = np.zeros(F_sim.shape)
+ F_sim = np.zeros(F_sim.shape) # noqa: N806
- evtInfo = {}
+ evtInfo = {} # noqa: N806
evtInfo['dT'] = tvec[1] - tvec[0]
evtInfo['numSteps'] = tvec.shape[0]
@@ -341,9 +341,9 @@ def main(aimName, evtName, getRV):
for nf in range(nfloors):
id_timeseries += 1
my_pattern = {}
- if nd == 0 or nd == 1:
+ if nd == 0 or nd == 1: # noqa: PLR1714
my_pattern['dof'] = nd + 1 # x and y dir
- elif nd == 2:
+ elif nd == 2: # noqa: PLR2004
my_pattern['dof'] = 6 # moments
my_pattern['floor'] = str(nf + 1)
@@ -359,8 +359,8 @@ def main(aimName, evtName, getRV):
evtInfo['subtype'] = 'ExperimentalWindForces'
evtInfo['type'] = 'Wind'
- timeSeries = []
- for id in range(id_timeseries):
+ timeSeries = [] # noqa: N806
+ for id in range(id_timeseries): # noqa: A001
my_ts = {}
my_ts['dT'] = tvec[1] - tvec[0]
my_ts['name'] = str(id + 1)
@@ -370,11 +370,11 @@ def main(aimName, evtName, getRV):
cur_floor = ts_floor_info[id]
my_ts['data'] = F_sim[(cur_dof) * nfloors + cur_floor, :, 0].tolist()
- timeSeries += [my_ts]
+ timeSeries += [my_ts] # noqa: N806
evtInfo['timeSeries'] = timeSeries
- with open(evtName, 'w', encoding='utf-8') as fp:
+ with open(evtName, 'w', encoding='utf-8') as fp: # noqa: PTH123
json.dump({'Events': [evtInfo]}, fp)
"""
@@ -434,73 +434,73 @@ def main(aimName, evtName, getRV):
"""
-def perform_POD(s_target, f_target, ncomp, l_mo):
- S_F = s_target[:, :, 0:] # do not exclude freq = 0 Hz
+def perform_POD(s_target, f_target, ncomp, l_mo): # noqa: N802, D103
+ S_F = s_target[:, :, 0:] # do not exclude freq = 0 Hz # noqa: N806
f_full = f_target[0:] # do not exclude freq = 0 Hz
- SpeN = f_full.shape[0] # exclude freq = 0 Hz
+ SpeN = f_full.shape[0] # exclude freq = 0 Hz # noqa: N806
- Vs = np.zeros((ncomp, ncomp, SpeN), dtype='complex_')
- Ds = np.zeros((ncomp, ncomp, SpeN))
+ Vs = np.zeros((ncomp, ncomp, SpeN), dtype='complex_') # noqa: N806
+ Ds = np.zeros((ncomp, ncomp, SpeN)) # noqa: N806
for ii in range(
SpeN
): # eigen - decomposition at every frequency of CPSD matrix and sort them
- [D_all, V_all] = np.linalg.eig(S_F[:, :, ii])
+ [D_all, V_all] = np.linalg.eig(S_F[:, :, ii]) # noqa: N806
ind = np.argsort(D_all)
Ds[:, :, ii] = np.real(np.diag(D_all[ind]))
Vs[:, :, ii] = V_all[:, ind]
# Truncation
- V = np.zeros((ncomp, l_mo, SpeN), dtype='complex_')
- D0 = np.zeros((l_mo, l_mo, SpeN))
+ V = np.zeros((ncomp, l_mo, SpeN), dtype='complex_') # noqa: N806
+ D0 = np.zeros((l_mo, l_mo, SpeN)) # noqa: N806
for tt in range(l_mo):
V[:, tt, :] = Vs[:, ncomp - 1 - tt, :]
D0[tt, tt, :] = Ds[ncomp - 1 - tt, ncomp - 1 - tt, :]
- D1 = np.zeros((l_mo, 1, SpeN))
+ D1 = np.zeros((l_mo, 1, SpeN)) # noqa: N806
for ii in range(SpeN):
D1[:, 0, ii] = np.diag(D0[:, :, ii])
return V, D1, SpeN
-def learn_CPSD(
- Fx,
- Fy,
- Tz,
+def learn_CPSD( # noqa: N802, D103, PLR0913
+ Fx, # noqa: N803
+ Fy, # noqa: N803
+ Tz, # noqa: N803
ms,
air_dens,
- vRef,
- H_full,
- B_full,
- D_full,
- MaxD_full,
+ vRef, # noqa: N803
+ H_full, # noqa: N803
+ B_full, # noqa: N803
+ D_full, # noqa: N803
+ MaxD_full, # noqa: N803
fs,
- Tw,
+ Tw, # noqa: N803
overlap,
fp,
- V_H,
+ V_H, # noqa: N803
fcut,
- T_full,
+ T_full, # noqa: N803
):
- Fx_full = ms**2 * Fx # full scale Fx(N)
- Fy_full = ms**2 * Fy # full scale Fy(N)
- Tz_full = ms**3 * Tz # full scale Tz(N.m)
+ Fx_full = ms**2 * Fx # full scale Fx(N) # noqa: N806
+ Fy_full = ms**2 * Fy # full scale Fy(N) # noqa: N806
+ Tz_full = ms**3 * Tz # full scale Tz(N.m) # noqa: N806
# Force Coefficients (unitless)
- CFx = Fx_full / (0.5 * air_dens * vRef**2 * H_full * B_full)
- CFy = Fy_full / (0.5 * air_dens * vRef**2 * H_full * D_full)
- CTz = Tz_full / (0.5 * air_dens * vRef**2 * H_full * MaxD_full**2 / 2)
+ CFx = Fx_full / (0.5 * air_dens * vRef**2 * H_full * B_full) # noqa: N806
+ CFy = Fy_full / (0.5 * air_dens * vRef**2 * H_full * D_full) # noqa: N806
+ CTz = Tz_full / (0.5 * air_dens * vRef**2 * H_full * MaxD_full**2 / 2) # noqa: N806
# Mean Force Coefficients
- CFx_mean = np.mean(CFx, axis=1)
- CFy_mean = np.mean(CFy, axis=1)
- CTz_mean = np.mean(CTz, axis=1)
+ CFx_mean = np.mean(CFx, axis=1) # noqa: N806
+ CFy_mean = np.mean(CFy, axis=1) # noqa: N806
+ CTz_mean = np.mean(CTz, axis=1) # noqa: N806
- comp_CFmean = np.concatenate([CFx_mean, CFy_mean, CTz_mean])
+ comp_CFmean = np.concatenate([CFx_mean, CFy_mean, CTz_mean]) # noqa: N806
- RF = 3.5 # Reduction Factor
+ RF = 3.5 # Reduction Factor # noqa: N806
# Normalization factor
xnorm = np.std(CFx - CFx_mean[np.newaxis].T, axis=1) * RF
@@ -509,10 +509,10 @@ def learn_CPSD(
norm_all = np.concatenate([xnorm, ynorm, tornorm])
# Standardazation of Forces (force coeff have about the same range)
- CFx_norm = (CFx - np.mean(CFx, axis=1)[np.newaxis].T) / xnorm[np.newaxis].T
- CFy_norm = (CFy - np.mean(CFy, axis=1)[np.newaxis].T) / ynorm[np.newaxis].T
- CTz_norm = (CTz - np.mean(CTz, axis=1)[np.newaxis].T) / tornorm[np.newaxis].T
- Components = np.vstack([CFx_norm, CFy_norm, CTz_norm]).T
+ CFx_norm = (CFx - np.mean(CFx, axis=1)[np.newaxis].T) / xnorm[np.newaxis].T # noqa: N806
+ CFy_norm = (CFy - np.mean(CFy, axis=1)[np.newaxis].T) / ynorm[np.newaxis].T # noqa: N806
+ CTz_norm = (CTz - np.mean(CTz, axis=1)[np.newaxis].T) / tornorm[np.newaxis].T # noqa: N806
+ Components = np.vstack([CFx_norm, CFy_norm, CTz_norm]).T # noqa: N806
# Smoothed target CPSD
wind_size = fs * Tw
@@ -521,7 +521,7 @@ def learn_CPSD(
# nfft = int(wind_size)
fcut_sc = (V_H / vRef) * fcut
dt = 1 / (2 * fcut_sc) # max.time incremen to avoid aliasing(s)
- N_t = round(T_full / dt) # number of time points
+ N_t = round(T_full / dt) # number of time points # noqa: N806
nfft = N_t
t_init = time.time()
@@ -530,20 +530,20 @@ def learn_CPSD(
Components, Components, wind_size, nover, nfft, fp
)
- print(f' - Elapsed time: {time.time() - t_init:.3} seconds.\n')
+ print(f' - Elapsed time: {time.time() - t_init:.3} seconds.\n') # noqa: T201
return s_target, f_target, norm_all, comp_CFmean, Fx_full, Fy_full, Tz_full
-def cpsd_matlab(Components1, Components2, wind_size, nover, nfft, fp):
+def cpsd_matlab(Components1, Components2, wind_size, nover, nfft, fp): # noqa: N803, D103
window = windows.hann(int(wind_size))
ncombs1 = Components1.shape[1]
ncombs2 = Components2.shape[1]
- nSampPoints = int(nfft / 2 + 1)
+ nSampPoints = int(nfft / 2 + 1) # noqa: N806
s_target = np.zeros((ncombs1, ncombs2, nSampPoints), dtype='complex_')
- print('Training cross power spectrum density..')
+ print('Training cross power spectrum density..') # noqa: T201
for nc2 in range(ncombs2):
for nc1 in range(ncombs1):
[f_target, s_tmp] = csd(
@@ -559,21 +559,21 @@ def cpsd_matlab(Components1, Components2, wind_size, nover, nfft, fp):
return s_target, f_target
-def simulation_gaussian(
+def simulation_gaussian( # noqa: D103, PLR0913
ncomp,
- N_t,
- V_vH,
- D_vH,
- theta_vH,
+ N_t, # noqa: N803
+ V_vH, # noqa: N803
+ D_vH, # noqa: N803
+ theta_vH, # noqa: N803
nf_dir,
- N_f,
+ N_f, # noqa: N803
f_inc,
f,
l_mo,
tvec,
- SpeN,
- V_H,
- vRef,
+ SpeN, # noqa: ARG001, N803
+ V_H, # noqa: N803
+ vRef, # noqa: N803
seed,
seed_num,
):
@@ -581,10 +581,10 @@ def simulation_gaussian(
# Set Seed
#
- folderName = os.path.basename(
- os.getcwd()
+ folderName = os.path.basename( # noqa: PTH119, N806
+ os.getcwd() # noqa: PTH109
) # Lets get n from workdir.n and add this to the seed
- sampNum = folderName.split('.')[-1]
+ sampNum = folderName.split('.')[-1] # noqa: N806
if sampNum == 'templatedir':
np.random.seed(seed[seed_num])
@@ -592,18 +592,18 @@ def simulation_gaussian(
np.random.seed(seed[seed_num] + int(sampNum))
# force coefficients initialize matrix
- F_jzm = np.zeros((ncomp, N_t))
+ F_jzm = np.zeros((ncomp, N_t)) # noqa: N806
f_tmp = np.linspace(0, (N_f - 1) * f_inc, N_f)
for m in range(l_mo):
mo = m # current mode #
- Vmo = V_vH[nf_dir, mo, :] # eigenvector for mode mo
+ Vmo = V_vH[nf_dir, mo, :] # eigenvector for mode mo # noqa: N806
# Dmo = D_vH[mo, 0,:] # eigenvalue for mode mo
# To avoid nan when calculating VDmo
- Dmo = D_vH[mo, 0, :] + 1j * 0
+ Dmo = D_vH[mo, 0, :] + 1j * 0 # noqa: N806
thetmo = theta_vH[nf_dir, mo, :] # theta for mode mo
- VDmo = (
+ VDmo = ( # noqa: N806
np.sqrt((V_H / vRef) ** 3)
* np.abs(Vmo)
* (np.ones((ncomp, 1)) * np.sqrt(Dmo))
@@ -614,104 +614,104 @@ def simulation_gaussian(
# Loop over floors
# g_jm = np.zeros((N_t, ncomp),dtype = 'complex_')
- F_jm = np.zeros((ncomp, N_t))
+ F_jm = np.zeros((ncomp, N_t)) # noqa: N806
coef = np.sqrt(2) * np.sqrt(f_inc) * np.exp(1j * varth)
coef2 = np.exp(1j * ((mo + 1) / l_mo * f_inc) * tvec)
- fVDmo = interp1d(f, VDmo, kind='linear', fill_value='extrapolate')
+ fVDmo = interp1d(f, VDmo, kind='linear', fill_value='extrapolate') # noqa: N806
fthetmo = interp1d(f, thetmo, kind='linear', fill_value='extrapolate')
- fV_interp = np.abs(fVDmo(f_tmp))
+ fV_interp = np.abs(fVDmo(f_tmp)) # noqa: N806
fthet_interp = np.exp((1j) * (fthetmo(f_tmp)))
for j in range(ncomp):
# l denotes a particular freq. point
# m denotes a particular mode
# j denotes a particular floor
- fVDmo = interp1d(f, VDmo[j, :], kind='linear', fill_value='extrapolate')
+ fVDmo = interp1d(f, VDmo[j, :], kind='linear', fill_value='extrapolate') # noqa: N806
fthetmo = interp1d(
f, thetmo[j, :], kind='linear', fill_value='extrapolate'
)
- B_jm = np.zeros((N_t,), dtype='complex_')
+ B_jm = np.zeros((N_t,), dtype='complex_') # noqa: N806
B_jm[0:N_f] = coef * fV_interp[j, :] * fthet_interp[j, :]
g_jm = np.fft.ifft(B_jm) * N_t
F_jm[j, :] = np.real(g_jm * coef2)
# sum up F from different modes (zero - mean)
- F_jzm = F_jzm + F_jm
+ F_jzm = F_jzm + F_jm # noqa: N806
return F_jzm
-def err_exit(msg):
- print(msg)
- with open('../workflow.err', 'w') as f:
+def err_exit(msg): # noqa: D103
+ print(msg) # noqa: T201
+ with open('../workflow.err', 'w') as f: # noqa: PTH123
f.write(msg)
- exit(-1)
+ exit(-1) # noqa: PLR1722
if __name__ == '__main__':
# parseWindMatFile("Forces_ANG000_phase1.mat", "Forces_ANG000_phase1.json")
# parseWindMatFile("TargetSpectra_ANG000_phase1.mat", "TargetSpectra_ANG000_phase1.json")
- inputArgs = sys.argv
+ inputArgs = sys.argv # noqa: N816, F405
# set filenames
- aimName = sys.argv[2]
- evtName = sys.argv[4]
+ aimName = sys.argv[2] # noqa: N816, F405
+ evtName = sys.argv[4] # noqa: N816, F405
- getRV = False
- for myarg in sys.argv:
+ getRV = False # noqa: N816
+ for myarg in sys.argv: # noqa: F405
if myarg == '--getRV':
- getRV = True
+ getRV = True # noqa: N816
if error_tag and getRV:
- with open('../workflow.err', 'w') as f:
- print('Failed to import module ' + moduleName)
+ with open('../workflow.err', 'w') as f: # noqa: PTH123
+ print('Failed to import module ' + moduleName) # noqa: T201
f.write(
'Failed to import module '
+ moduleName
+ '. Please check the python path in the preference'
)
- exit(-1)
+ exit(-1) # noqa: PLR1722
# if getRV:
# aimName = aimName + ".sc"
try:
main(aimName, evtName, getRV)
- except Exception as err:
+ except Exception as err: # noqa: BLE001
import traceback
if getRV:
- with open('../workflow.err', 'w') as f:
+ with open('../workflow.err', 'w') as f: # noqa: PTH123
f.write(
'Failed in wind load generator preprocessor:'
+ str(err)
+ '...'
+ str(traceback.format_exc())
)
- print(
+ print( # noqa: T201
'Failed in wind load generator preprocessor:'
+ str(err)
+ '...'
+ str(traceback.format_exc())
)
- exit(-1)
+ exit(-1) # noqa: PLR1722
else:
- with open('../dakota.err', 'w') as f:
+ with open('../dakota.err', 'w') as f: # noqa: PTH123
f.write(
'Failed to generate wind load: '
+ str(err)
+ '...'
+ str(traceback.format_exc())
)
- print(
+ print( # noqa: T201
'Failed to generate wind load:'
+ str(err)
+ '...'
+ str(traceback.format_exc())
)
- exit(-1)
+ exit(-1) # noqa: PLR1722
diff --git a/modules/createEVENT/experimentalWindPressures/convertWindMat.py b/modules/createEVENT/experimentalWindPressures/convertWindMat.py
index 1cacbbf7a..81c747429 100644
--- a/modules/createEVENT/experimentalWindPressures/convertWindMat.py
+++ b/modules/createEVENT/experimentalWindPressures/convertWindMat.py
@@ -1,4 +1,4 @@
-# python code to open the .mat file
+# python code to open the .mat file # noqa: INP001, D100
# and put data into a SimCenter JSON file
import json
@@ -8,9 +8,9 @@
import scipy.io as sio
-def parseWindMatFile(matFileIn, windFileOutName):
- dataDir = os.getcwd()
- scriptDir = os.path.dirname(os.path.realpath(__file__))
+def parseWindMatFile(matFileIn, windFileOutName): # noqa: N802, N803, D103
+ dataDir = os.getcwd() # noqa: PTH109, N806, F841
+ scriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N806, F841
mat_contents = sio.loadmat(matFileIn)
@@ -18,11 +18,11 @@ def parseWindMatFile(matFileIn, windFileOutName):
breadth = float(mat_contents['B'][0])
height = float(mat_contents['H'][0])
fs = float(mat_contents['fs'][0])
- vRef = float(mat_contents['Vref'][0])
+ vRef = float(mat_contents['Vref'][0]) # noqa: N806
if 's_target' in mat_contents:
- case = 'spectra'
- comp_CFmean = np.squeeze(np.array(mat_contents['comp_CFmean']))
+ case = 'spectra' # noqa: F841
+ comp_CFmean = np.squeeze(np.array(mat_contents['comp_CFmean'])) # noqa: N806
norm_all = np.squeeze(np.array(mat_contents['norm_all']))
f_target = np.squeeze(np.array(mat_contents['f_target']))
s_target = np.squeeze(np.array(mat_contents['s_target']))
@@ -41,12 +41,12 @@ def parseWindMatFile(matFileIn, windFileOutName):
)
elif 'Fx' in mat_contents:
- Fx = np.squeeze(np.array(mat_contents['Fx']))
- Fy = np.squeeze(np.array(mat_contents['Fy']))
- Tz = np.squeeze(np.array(mat_contents['Tz']))
+ Fx = np.squeeze(np.array(mat_contents['Fx'])) # noqa: N806
+ Fy = np.squeeze(np.array(mat_contents['Fy'])) # noqa: N806
+ Tz = np.squeeze(np.array(mat_contents['Tz'])) # noqa: N806
t = np.squeeze(np.array(mat_contents['t']))
- myJson = {}
+ myJson = {} # noqa: N806
myJson['D'] = depth
myJson['H'] = height
myJson['B'] = breadth
@@ -57,7 +57,7 @@ def parseWindMatFile(matFileIn, windFileOutName):
myJson['Fy'] = np.array(Fy).tolist()
myJson['Tz'] = np.array(Tz).tolist()
myJson['t'] = np.array(t).tolist()
- with open(windFileOutName, 'w') as f:
+ with open(windFileOutName, 'w') as f: # noqa: PTH123
json.dump(myJson, f)
# file = open(windFileOutName,"w")
@@ -127,33 +127,33 @@ def parseWindMatFile(matFileIn, windFileOutName):
# Check valid JSON file,
validate = True
if validate:
- with open(windFileOutName) as infile:
+ with open(windFileOutName) as infile: # noqa: PTH123
json_data = infile.read()
# Try to parse the JSON data
try:
- json_object = json.loads(json_data)
- print('JSON file is valid')
+ json_object = json.loads(json_data) # noqa: F841
+ print('JSON file is valid') # noqa: T201
except json.decoder.JSONDecodeError:
- print('JSON file is not valid')
+ print('JSON file is not valid') # noqa: T201
-def createSpectraJson(
- windFileOutName,
+def createSpectraJson( # noqa: N802, D103
+ windFileOutName, # noqa: N803
breadth,
depth,
height,
fs,
- vRef,
+ vRef, # noqa: N803
f_target,
s_target,
- comp_CFmean,
+ comp_CFmean, # noqa: N803
norm_all,
):
- ncomp = comp_CFmean.shape[0]
- nf = f_target.shape[0]
+ ncomp = comp_CFmean.shape[0] # noqa: F841
+ nf = f_target.shape[0] # noqa: F841
- myJson = {}
+ myJson = {} # noqa: N806
myJson['D'] = depth
myJson['H'] = height
myJson['B'] = breadth
@@ -166,21 +166,21 @@ def createSpectraJson(
myJson['s_target_real'] = np.real(s_target).tolist()
myJson['s_target_imag'] = np.imag(s_target).tolist()
- with open(windFileOutName, 'w') as f:
+ with open(windFileOutName, 'w') as f: # noqa: PTH123
json.dump(myJson, f)
# Check valid JSON file
validate = True
if validate:
- with open(windFileOutName) as infile:
+ with open(windFileOutName) as infile: # noqa: PTH123
json_data = infile.read()
# Try to parse the JSON data
try:
- json_object = json.loads(json_data)
- print('JSON file is valid')
+ json_object = json.loads(json_data) # noqa: F841
+ print('JSON file is valid') # noqa: T201
except json.decoder.JSONDecodeError:
- print('JSON file is not valid')
+ print('JSON file is not valid') # noqa: T201
# file = open(windFileOutName,"w")
# file.write("{")
@@ -251,21 +251,21 @@ def createSpectraJson(
# file.close()
-def createPODJson(
+def createPODJson( # noqa: N802, D103
filename,
- V,
- D1,
- SpeN,
+ V, # noqa: N803
+ D1, # noqa: N803
+ SpeN, # noqa: N803
f_target,
norm_all,
- D,
- H,
- B,
+ D, # noqa: N803
+ H, # noqa: N803
+ B, # noqa: N803
fs,
- vRef,
- comp_CFmean,
+ vRef, # noqa: N803
+ comp_CFmean, # noqa: N803
):
- myJson = {}
+ myJson = {} # noqa: N806
myJson['V_imag'] = np.imag(V).tolist()
myJson['V_real'] = np.real(V).tolist()
myJson['D1'] = D1.tolist()
@@ -279,5 +279,5 @@ def createPODJson(
myJson['fs'] = fs
myJson['Vref'] = vRef
- with open(filename, 'w') as f:
+ with open(filename, 'w') as f: # noqa: PTH123
json.dump(myJson, f)
diff --git a/modules/createEVENT/experimentalWindPressures/experimentalWindPressures.py b/modules/createEVENT/experimentalWindPressures/experimentalWindPressures.py
index af2cb3168..b6325e8b9 100644
--- a/modules/createEVENT/experimentalWindPressures/experimentalWindPressures.py
+++ b/modules/createEVENT/experimentalWindPressures/experimentalWindPressures.py
@@ -1,12 +1,12 @@
-import json
+import json # noqa: INP001, D100
import os
import time
try:
- moduleName = 'numpy'
+ moduleName = 'numpy' # noqa: N816
import numpy as np
- moduleName = 'scipy'
+ moduleName = 'scipy' # noqa: N816
import os
from scipy import interpolate
@@ -15,28 +15,28 @@
from scipy.stats import gaussian_kde, genpareto, norm
error_tag = False # global variable
-except:
+except: # noqa: E722
error_tag = True
-from convertWindMat import *
+from convertWindMat import * # noqa: F403
-errPath = './workflow.err' # error file name
-sys.stderr = open(
+errPath = './workflow.err' # error file name # noqa: N816
+sys.stderr = open( # noqa: SIM115, PTH123, F405
errPath, 'w'
) # redirecting stderr (this way we can capture all sorts of python errors)
-def err_exit(msg):
- print('Failed in wind load generator: ' + msg) # display in stdout
+def err_exit(msg): # noqa: D103
+ print('Failed in wind load generator: ' + msg) # display in stdout # noqa: T201
print(
'Failed in wind load generator: ' + msg,
- file=sys.stderr,
+ file=sys.stderr, # noqa: F405
) # display in stderr
- exit(-1) # exit with non-zero exit code
+ exit(-1) # exit with non-zero exit code # noqa: PLR1722
-def main(aimName, evtName, getRV):
- with open(aimName, encoding='utf-8') as f:
+def main(aimName, evtName, getRV): # noqa: C901, N803, D103, PLR0915
+ with open(aimName, encoding='utf-8') as f: # noqa: PTH123
aim_data = json.load(f)
evt_data = aim_data['Events'][0]
@@ -48,16 +48,16 @@ def main(aimName, evtName, getRV):
#
# 4*Vref, wind speed at full scale (m/s)
- V_H = evt_data['windSpeed']
- T_full = evt_data[
+ V_H = evt_data['windSpeed'] # noqa: N806
+ T_full = evt_data[ # noqa: N806
'fullScaleDuration'
] # 1600, Duration of wind pressure realization at full scale (s)
- # TODO check if there is recommended modes
+ # TODO check if there is recommended modes # noqa: TD002, TD004
perc_mod = (
evt_data['modePercent'] / 100
) # percentage of modes to include in the simulation
seed = evt_data['seed'] # Set seeds for reproducibility
- Tw = evt_data[
+ Tw = evt_data[ # noqa: N806
'windowSize'
] # 4, window size/duration (sec) - smaller window leads to more smoothing - model scale
overlap = evt_data['overlapPerc'] / 100 # 0.5 , 50% overlap - user defined
@@ -71,28 +71,28 @@ def main(aimName, evtName, getRV):
evt_data['selectedTaps']
) # np.arange(91,150+1) - 1 , to start from zero # selected taps for simulation (1:510 if all taps are included)
tap = len(selected_taps)
- filtHz = 100 # if applied - filtering high-frequency noise - model scale
+ filtHz = 100 # if applied - filtering high-frequency noise - model scale # noqa: N806
# set equal to 0 if not applied
l_mo = int(np.ceil(tap * perc_mod)) # number of modes included in the simulation
- if l_mo > 100 or l_mo < 0:
+ if l_mo > 100 or l_mo < 0: # noqa: PLR2004
err_exit(
'Number of modes should be equal or less than the number of components'
)
- print('Number of modes = ' + str(l_mo))
+ print('Number of modes = ' + str(l_mo)) # noqa: T201
#
# Parameters
#
- tailTrshd = 5 # Percentage of tail threshold on both tails - Fixed value for all times series
+ tailTrshd = 5 # Percentage of tail threshold on both tails - Fixed value for all times series # noqa: N806
nl = tailTrshd / 100 # Lower Tail Threshold
nu = 1 - nl # Upper Tail Threshold
if getRV:
- print('Running Get RV')
+ print('Running Get RV') # noqa: T201
do_parallel = True
- runType = aim_data['runType']
+ runType = aim_data['runType'] # noqa: N806
if do_parallel:
#
@@ -103,10 +103,10 @@ def main(aimName, evtName, getRV):
from multiprocessing import Pool
n_processor = os.cpu_count()
- print('Starting pool')
+ print('Starting pool') # noqa: T201
tmp = time.time()
pool = Pool(n_processor)
- print(f' - Elapsed time: {time.time() - tmp:.3f} seconds.\n')
+ print(f' - Elapsed time: {time.time() - tmp:.3f} seconds.\n') # noqa: T201
else:
from mpi4py import MPI
from mpi4py.futures import MPIPoolExecutor
@@ -116,27 +116,27 @@ def main(aimName, evtName, getRV):
pool = MPIPoolExecutor()
if filename.endswith('.mat'):
- pressure_data = sio.loadmat(filename)
+ pressure_data = sio.loadmat(filename) # noqa: F405
for key in pressure_data:
# print(key)
if not key.startswith('__'):
pressure_data[key] = pressure_data[key][0]
elif filename.endswith('.json'):
- with open(filename, encoding='utf-8') as jsonFile:
+ with open(filename, encoding='utf-8') as jsonFile: # noqa: PTH123, N806
pressure_data = json.load(jsonFile)
fs = np.squeeze(pressure_data['frequency'])
- Vref = np.squeeze(pressure_data['windSpeed'])
- Td = np.squeeze(pressure_data['period'])
+ Vref = np.squeeze(pressure_data['windSpeed']) # noqa: N806
+ Td = np.squeeze(pressure_data['period']) # noqa: N806
pressure_json = pressure_data['pressureCoefficients']
dt = 1 / fs
tvec = np.arange(0, Td, dt) + dt
- Cp_pf = np.zeros((len(tvec), len(pressure_json)))
+ Cp_pf = np.zeros((len(tvec), len(pressure_json))) # noqa: N806
id_list = set()
for tap_info in pressure_json:
- id = np.squeeze(tap_info['id'])
+ id = np.squeeze(tap_info['id']) # noqa: A001
data = np.squeeze(tap_info['data'])
Cp_pf[:, id - 1] = data
id_list.add(int(id))
@@ -155,17 +155,17 @@ def main(aimName, evtName, getRV):
"""
if ms == 0: # when mat file is imported, model scale is not precalculated
- print('Model scale not found. Calculating the unified model scale..')
- D = np.squeeze(pressure_data['depth'])
- H = np.squeeze(pressure_data['height'])
- B = np.squeeze(pressure_data['breadth'])
- D_full = aim_data['GeneralInformation']['depth']
- H_full = aim_data['GeneralInformation']['height']
- B_full = aim_data['GeneralInformation']['width']
+ print('Model scale not found. Calculating the unified model scale..') # noqa: T201
+ D = np.squeeze(pressure_data['depth']) # noqa: N806
+ H = np.squeeze(pressure_data['height']) # noqa: N806
+ B = np.squeeze(pressure_data['breadth']) # noqa: N806
+ D_full = aim_data['GeneralInformation']['depth'] # noqa: N806
+ H_full = aim_data['GeneralInformation']['height'] # noqa: N806
+ B_full = aim_data['GeneralInformation']['width'] # noqa: N806
ms = H_full / H
- print(f'Model scaling factor of {ms:.2f} is used')
+ print(f'Model scaling factor of {ms:.2f} is used') # noqa: T201
if ((ms != D_full / D) or (ms != B_full / B)) and getRV:
- print(
+ print( # noqa: T201
f'Warning: target-data geometry scaling ratio is inconsistent: H={H_full / H:.2}, B={B_full / B:.2}, D={D_full / D:.2}'
)
@@ -176,7 +176,7 @@ def main(aimName, evtName, getRV):
# Values for paretotails function
- N = np.size(Cp_pf, 1) # total number of data points
+ N = np.size(Cp_pf, 1) # total number of data points # noqa: N806, F841
fc = fs / 2 # Nyquist Frequency (Hz) wind tunnel
fp = fs / ms # scaled frequency
fcut = fc / ms # scaled Nyquist frequency
@@ -188,13 +188,13 @@ def main(aimName, evtName, getRV):
# filtering added
if filtHz > 0:
n = 2
- Hz = filtHz
- Wn = Hz / (fs / 2)
+ Hz = filtHz # noqa: N806
+ Wn = Hz / (fs / 2) # noqa: N806
[b, a] = butter(n, Wn)
x = Cp_pf - np.mean(Cp_pf, axis=0)
# y = filter(b, a, x)
y = lfilter(b, a, x, axis=0)
- Cp = y + np.mean(Cp_pf, axis=0)
+ Cp = y + np.mean(Cp_pf, axis=0) # noqa: N806
#######################################################################################################################
# Standardization of wind records
@@ -203,23 +203,23 @@ def main(aimName, evtName, getRV):
# when standardized, requiring less modes in the simulation.
# Pressure Coefficients Time historites
- Cp_std = np.std(Cp, axis=0) # std of time series for later use
+ Cp_std = np.std(Cp, axis=0) # std of time series for later use # noqa: N806
# mean of time series for later use
- Cp_mean = np.mean(Cp, axis=0)
+ Cp_mean = np.mean(Cp, axis=0) # noqa: N806
# standardize Cp time series such that mean = 0 and std = 1
# for all taps.
# Cp_norm = np.normalize(Cp)
- row_sums = Cp.sum(axis=1)
- Cp_norm = (Cp - Cp_mean) / Cp_std
+ row_sums = Cp.sum(axis=1) # noqa: F841
+ Cp_norm = (Cp - Cp_mean) / Cp_std # noqa: N806
# Smoothed target CPSD
wind_size = fs * Tw
nover = np.round(overlap * wind_size)
fcut_sc = (V_H / Vref) * fcut # scaled cut-off frequency
dt = 1 / (2 * fcut_sc) # max. time increment to avoid aliasing (s)
- N_t = int(np.round(T_full / dt)) # number of time points
+ N_t = int(np.round(T_full / dt)) # number of time points # noqa: N806
nfft = N_t
#
@@ -232,25 +232,25 @@ def main(aimName, evtName, getRV):
if out > 0:
d = np.concatenate([d, np.array([d[-1] + out])])
- # TODO: dealing with gpuArray, gather
- nSampPoints = int(nfft / 2 + 1)
+ # TODO: dealing with gpuArray, gather # noqa: TD002
+ nSampPoints = int(nfft / 2 + 1) # noqa: N806
s_target = np.zeros(
(len(selected_taps), len(selected_taps), nSampPoints), dtype='complex_'
)
- startTime = time.time()
- # TODO: works only if the selected taps are is continuous
+ startTime = time.time() # noqa: N806, F841
+ # TODO: works only if the selected taps are is continuous # noqa: TD002
selected_taps_tmp = np.concatenate(
[selected_taps, [selected_taps[-1] + 1]]
) # zero is dummy that will not appear in the analysis
- print('Training cross power spectrum density..')
+ print('Training cross power spectrum density..') # noqa: T201
t_init = time.time()
nloop = (len(d) - 1) * (len(d) - 1)
for i in range(1, len(d)):
for j in range(1, len(d)):
if np.mod((i - 1) * (len(d) - 1) + j, round(nloop / 10)) == 0:
- print(
+ print( # noqa: T201
f'{((i - 1) * (len(d) - 1) + j) / nloop * 100:.0f} % completed'
)
@@ -272,35 +272,35 @@ def main(aimName, evtName, getRV):
# cpsd_all[kk,ll] = s
s_target[d[i - 1] : d[i], d[j - 1] : d[j]] = s
- print(f' - Elapsed time: {time.time() - t_init:.1f} seconds.\n')
+ print(f' - Elapsed time: {time.time() - t_init:.1f} seconds.\n') # noqa: T201
- unitLength = aim_data['GeneralInformation']['units']['length']
- unitTime = aim_data['GeneralInformation']['units']['time']
+ unitLength = aim_data['GeneralInformation']['units']['length'] # noqa: N806
+ unitTime = aim_data['GeneralInformation']['units']['time'] # noqa: N806
- print('Performing POD..')
+ print('Performing POD..') # noqa: T201
t_init = time.time()
# Spectral Proper Orthogonal Decomposition
- V, D1, SpeN = perform_POD(s_target, f_target, tap, l_mo, pool)
- print(f' - Elapsed time: {time.time() - t_init:.1f} seconds.\n')
+ V, D1, SpeN = perform_POD(s_target, f_target, tap, l_mo, pool) # noqa: N806
+ print(f' - Elapsed time: {time.time() - t_init:.1f} seconds.\n') # noqa: T201
#
# Computing nonGaussian CDFs
#
if do_parallel:
- print('Computing nonGaussian CDF in parallel')
+ print('Computing nonGaussian CDF in parallel') # noqa: T201
tmp = time.time()
iterables = ((Cp_norm[:, selected_taps[i] - 1],) for i in range(tap))
try:
result_objs = list(pool.starmap(getCDF, iterables))
- print(f' - Elapsed time: {time.time() - tmp:.3f} seconds.\n')
+ print(f' - Elapsed time: {time.time() - tmp:.3f} seconds.\n') # noqa: T201
except KeyboardInterrupt:
- print('Ctrl+c received, terminating and joining pool.')
+ print('Ctrl+c received, terminating and joining pool.') # noqa: T201
try:
- self.pool.shutdown()
- except Exception:
- sys.exit()
+ self.pool.shutdown() # noqa: F405
+ except Exception: # noqa: BLE001
+ sys.exit() # noqa: F405
my_cdf_vects = np.zeros((1000, tap))
my_cdf_x_range = np.zeros((2, tap))
@@ -309,7 +309,7 @@ def main(aimName, evtName, getRV):
my_cdf_x_range[:, i] = result_objs[i][1]
else:
- print('Computing nonGaussian CDF')
+ print('Computing nonGaussian CDF') # noqa: T201
tmp = time.time()
my_cdf_vects = np.zeros((1000, tap))
my_cdf_x_range = np.zeros((2, tap))
@@ -327,7 +327,7 @@ def main(aimName, evtName, getRV):
Cp_norm[:, selected_taps[i] - 1]
)
- print(f' - Elapsed time: {time.time() - t_init:.1f} seconds.\n')
+ print(f' - Elapsed time: {time.time() - t_init:.1f} seconds.\n') # noqa: T201
# Simulation of Gaussian Stochastic wind force coefficients
@@ -366,19 +366,19 @@ def main(aimName, evtName, getRV):
# save into a file
#
- if not os.path.exists('../input_File'):
- os.makedirs('../input_File')
- sio.savemat('../input_File/POD_Cp.mat', iterm_json)
+ if not os.path.exists('../input_File'): # noqa: PTH110
+ os.makedirs('../input_File') # noqa: PTH103
+ sio.savemat('../input_File/POD_Cp.mat', iterm_json) # noqa: F405
file_loaded = False
else:
- iterm_json = sio.loadmat('../input_File/POD_Cp.mat')
+ iterm_json = sio.loadmat('../input_File/POD_Cp.mat') # noqa: F405
selected_taps = np.squeeze(iterm_json['selected_taps'])
ms = np.squeeze(iterm_json['ms'])
- V_H = np.squeeze(iterm_json['V_H'])
- T_full = np.squeeze(iterm_json['T_full'])
- Cp_norm = np.squeeze(iterm_json['Cp_norm'])
+ V_H = np.squeeze(iterm_json['V_H']) # noqa: N806
+ T_full = np.squeeze(iterm_json['T_full']) # noqa: N806
+ Cp_norm = np.squeeze(iterm_json['Cp_norm']) # noqa: N806
# Tw =np.squeeze(iterm_json["Tw"])
# overlap =np.squeeze(iterm_json["overlap"])
# nover =np.squeeze(iterm_json["nover"])
@@ -388,14 +388,14 @@ def main(aimName, evtName, getRV):
fcut_sc = np.squeeze(iterm_json['fcut_sc'])
# s_target =np.squeeze(iterm_json["s_target"])
f_target = np.squeeze(iterm_json['f_target'])
- Vref = np.squeeze(iterm_json['Vref'])
- Cp_std = np.squeeze(iterm_json['Cp_std'])
- Cp_mean = np.squeeze(iterm_json['Cp_mean'])
- unitLength = np.squeeze(iterm_json['length'])
- unitTime = np.squeeze(iterm_json['time'])
- V = np.squeeze(iterm_json['V'])
- D1 = iterm_json['D1']
- SpeN = np.squeeze(iterm_json['SpeN'])
+ Vref = np.squeeze(iterm_json['Vref']) # noqa: N806
+ Cp_std = np.squeeze(iterm_json['Cp_std']) # noqa: N806
+ Cp_mean = np.squeeze(iterm_json['Cp_mean']) # noqa: N806
+ unitLength = np.squeeze(iterm_json['length']) # noqa: N806
+ unitTime = np.squeeze(iterm_json['time']) # noqa: N806
+ V = np.squeeze(iterm_json['V']) # noqa: N806
+ D1 = iterm_json['D1'] # noqa: N806
+ SpeN = np.squeeze(iterm_json['SpeN']) # noqa: N806
my_cdf_vects = np.squeeze(iterm_json['my_cdf_vects'])
my_cdf_x_range = np.squeeze(iterm_json['my_cdf_x_range'])
@@ -406,35 +406,35 @@ def main(aimName, evtName, getRV):
selected_taps = np.arange(0, Cp_norm.shape[0])
f_full = f_target[0:] # don't exclude freq = 0 Hz
- f_vH = (V_H / Vref) * f_full # scaledfreq.(Hz)
- V_vH = V # scaled eigenmodes
- D_vH = (V_H / Vref) ** 3 * D1 # scaled eigenvalues
- theta_vH = np.arctan2(np.imag(V_vH), np.real(V_vH)) # scaled theta
+ f_vH = (V_H / Vref) * f_full # scaledfreq.(Hz) # noqa: N806
+ V_vH = V # scaled eigenmodes # noqa: N806
+ D_vH = (V_H / Vref) ** 3 * D1 # scaled eigenvalues # noqa: N806
+ theta_vH = np.arctan2(np.imag(V_vH), np.real(V_vH)) # scaled theta # noqa: N806
f_inc = 1 / T_full # freq.increment(Hz)
# number of time points
- N_f = round(T_full * np.squeeze(fcut_sc)) + 1
+ N_f = round(T_full * np.squeeze(fcut_sc)) + 1 # noqa: N806
- N_t = round(T_full / dt) # number of time points
- fvec = np.arange(0, f_inc * (N_f), f_inc) # frequency line
+ N_t = round(T_full / dt) # number of time points # noqa: N806
+ fvec = np.arange(0, f_inc * (N_f), f_inc) # frequency line # noqa: F841
t_vec_sc = np.linspace(0, dt * N_t, N_t) # time line
f = f_vH[0:SpeN] # frequencies from the decomposition upto SpeN points(Hz)
nf_dir = np.arange(tap) # vector number of components
- Nsim = 1 # Number of realizations to be generated
+ Nsim = 1 # Number of realizations to be generated # noqa: N806
seeds = np.arange(seed, Nsim + seed) # Set seeds for reproducibility
#
# Creating Gaussian Realizations
#
- print('Creating Gaussian Realizations')
+ print('Creating Gaussian Realizations') # noqa: T201
t_init = time.time()
- CP_sim = np.zeros((len(seeds), tap, N_t))
+ CP_sim = np.zeros((len(seeds), tap, N_t)) # noqa: N806
for seed_num in range(len(seeds)):
t_init = time.time()
- F_jzm = simulation_gaussian(
+ F_jzm = simulation_gaussian( # noqa: N806
tap,
N_t,
V_vH,
@@ -456,16 +456,16 @@ def main(aimName, evtName, getRV):
F_jzm # zero-mean force coefficient time series (simulation)
)
- print(f' - Elapsed time: {time.time() - t_init:.1f} seconds.\n')
+ print(f' - Elapsed time: {time.time() - t_init:.1f} seconds.\n') # noqa: T201
#
# Creating Non-Gaussian Realizations
#
- print('Creating NonGaussian Realizations')
+ print('Creating NonGaussian Realizations') # noqa: T201
if do_parallel:
- Cp_nongauss_kernel = np.zeros((tap, CP_sim.shape[2], len(seeds)))
- print(f'Running {tap} simulations in parallel')
+ Cp_nongauss_kernel = np.zeros((tap, CP_sim.shape[2], len(seeds))) # noqa: N806
+ print(f'Running {tap} simulations in parallel') # noqa: T201
tmp = time.time()
iterables = (
@@ -481,21 +481,21 @@ def main(aimName, evtName, getRV):
)
try:
result_objs = list(pool.starmap(genCP, iterables))
- print(f' - Elapsed time: {time.time() - tmp:.3f} seconds.\n')
+ print(f' - Elapsed time: {time.time() - tmp:.3f} seconds.\n') # noqa: T201
except KeyboardInterrupt:
- print('Ctrl+c received, terminating and joining pool.')
+ print('Ctrl+c received, terminating and joining pool.') # noqa: T201
try:
- self.pool.shutdown()
- except Exception:
- sys.exit()
+ self.pool.shutdown() # noqa: F405
+ except Exception: # noqa: BLE001
+ sys.exit() # noqa: F405
- Cp_nongauss_kernel = np.zeros((tap, CP_sim.shape[2], len(seeds)))
+ Cp_nongauss_kernel = np.zeros((tap, CP_sim.shape[2], len(seeds))) # noqa: N806
Cp_nongauss_kernel[:, :, 0] = np.array(result_objs)
else:
- Cp_nongauss_kernel = np.zeros((tap, CP_sim.shape[2], len(seeds)))
+ Cp_nongauss_kernel = np.zeros((tap, CP_sim.shape[2], len(seeds))) # noqa: N806
- print(f'Running {tap} simulations in series')
+ print(f'Running {tap} simulations in series') # noqa: T201
tmp = time.time()
for seed_num in range(len(seeds)): # always 1
for i in range(tap):
@@ -508,11 +508,11 @@ def main(aimName, evtName, getRV):
my_cdf_x_range[:, i],
)
- print(f' - Elapsed time: {time.time() - tmp:.3f} seconds.\n')
+ print(f' - Elapsed time: {time.time() - tmp:.3f} seconds.\n') # noqa: T201
- Cp_std_tmp = Cp_std[selected_taps - 1][:, np.newaxis, np.newaxis]
- Cp_mean_tmp = Cp_mean[selected_taps - 1][:, np.newaxis, np.newaxis]
- Cp_nongauss = np.transpose(Cp_nongauss_kernel, (0, 2, 1)) * np.tile(
+ Cp_std_tmp = Cp_std[selected_taps - 1][:, np.newaxis, np.newaxis] # noqa: N806
+ Cp_mean_tmp = Cp_mean[selected_taps - 1][:, np.newaxis, np.newaxis] # noqa: N806
+ Cp_nongauss = np.transpose(Cp_nongauss_kernel, (0, 2, 1)) * np.tile( # noqa: N806
Cp_std_tmp, (1, len(seeds), N_t)
) + np.tile(Cp_mean_tmp, (1, len(seeds), N_t)) # destandardize the time series
@@ -526,7 +526,7 @@ def main(aimName, evtName, getRV):
# Save Results
#
- print('Saving results')
+ print('Saving results') # noqa: T201
pressure_data = iterm_json['pressureData']
@@ -601,25 +601,25 @@ def main(aimName, evtName, getRV):
# %% Plots for verification of code
#
- with open('tmpSimCenterLowRiseTPU.json', 'w', encoding='utf-8') as f:
+ with open('tmpSimCenterLowRiseTPU.json', 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(new_json, f)
# curScriptPath = abspath(getsourcefile(lambda:0))
- curScriptPath = os.path.realpath(__file__)
- creatEVENTDir = os.path.dirname(os.path.dirname(curScriptPath))
+ curScriptPath = os.path.realpath(__file__) # noqa: N806
+ creatEVENTDir = os.path.dirname(os.path.dirname(curScriptPath)) # noqa: PTH120, N806
- siteFile = os.path.join(creatEVENTDir, 'LowRiseTPU', 'LowRiseTPU')
+ siteFile = os.path.join(creatEVENTDir, 'LowRiseTPU', 'LowRiseTPU') # noqa: PTH118, N806
command_line = (
f'{siteFile} "--filenameAIM" {aimName} "--filenameEVENT" {evtName}'
)
- print('Processing pressure->force:')
- print(command_line)
+ print('Processing pressure->force:') # noqa: T201
+ print(command_line) # noqa: T201
# run command
try:
- os.system(command_line)
- except:
+ os.system(command_line) # noqa: S605
+ except: # noqa: E722
err_exit('Failed to convert pressure to force.')
# t_sc = ms*(Vref/V_H); #scale wind tunnel time series to compare
@@ -656,38 +656,38 @@ def main(aimName, evtName, getRV):
# plt.ylim([-1400,2000])
# plt.xlim([0,1000])
# plt.show()
- """
+ """ # noqa: W291
-def genCP(Cp_temp, Cp_sim_temp, nl, nu, my_cdf_vect, my_cdf_x_range):
+def genCP(Cp_temp, Cp_sim_temp, nl, nu, my_cdf_vect, my_cdf_x_range): # noqa: N802, N803, D103
#
# combining the loops to directly send temp instead of dist_kde
#
- # TODO; why double?
+ # TODO; why double? # noqa: TD002, TD004
- meanCp = np.mean(Cp_sim_temp)
- stdCp = np.std(Cp_sim_temp)
- F_vvv = (Cp_sim_temp - meanCp) / stdCp
+ meanCp = np.mean(Cp_sim_temp) # noqa: N806
+ stdCp = np.std(Cp_sim_temp) # noqa: N806
+ F_vvv = (Cp_sim_temp - meanCp) / stdCp # noqa: N806
# CDF points from Gaussian distribution
cdf_vvv = norm.cdf(F_vvv, 0, 1)
# force the data being bounded in due to numerical errors that can happen in Matlab when CDF ~0 or ~1;
- cdf_vvv[cdf_vvv < 0.00001] = 0.00001
- cdf_vvv[cdf_vvv > 0.99999] = 0.99999
+ cdf_vvv[cdf_vvv < 0.00001] = 0.00001 # noqa: PLR2004
+ cdf_vvv[cdf_vvv > 0.99999] = 0.99999 # noqa: PLR2004
# map F_vvv into F_nongauss through inverse cdf of the mix distribution
- # TODO why single precision for cdf_vv?
+ # TODO why single precision for cdf_vv? # noqa: TD002, TD004
return paretotails_icdf(cdf_vvv, nl, nu, Cp_temp, my_cdf_vect, my_cdf_x_range)
-def getCDF(Cp_temp):
+def getCDF(Cp_temp): # noqa: N802, N803, D103
kernel = gaussian_kde(Cp_temp)
kernel_cdf = np.vectorize(lambda x: kernel.integrate_box_1d(-np.inf, x))
my_cdf_x = np.linspace(
min(Cp_temp), max(Cp_temp), 1000
- ) # TODO is 1000 enough?
+ ) # TODO is 1000 enough? # noqa: TD002, TD004
my_cdf_vects = kernel_cdf(my_cdf_x) # Takes too long to evaluate
my_cdf_x_range = [min(Cp_temp), max(Cp_temp)]
@@ -695,7 +695,7 @@ def getCDF(Cp_temp):
return my_cdf_vects, my_cdf_x_range
-def paretotails_icdf(pf, nl, nu, temp, my_cdf_vect, my_cdf_x):
+def paretotails_icdf(pf, nl, nu, temp, my_cdf_vect, my_cdf_x): # noqa: D103
#
# Pareto percentile
#
@@ -715,7 +715,7 @@ def paretotails_icdf(pf, nl, nu, temp, my_cdf_vect, my_cdf_x):
# lower pareto
#
idx1 = np.where(pf < nl)
- myX = -lower_temp
+ myX = -lower_temp # noqa: N806
c, loc, scal = genpareto.fit(myX, loc=np.min(myX))
mydist = genpareto(c=c, loc=loc, scale=scal)
@@ -741,7 +741,7 @@ def paretotails_icdf(pf, nl, nu, temp, my_cdf_vect, my_cdf_x):
#
idx3 = np.where(pf > nu)
- myX = upper_temp
+ myX = upper_temp # noqa: N806
c, loc, scal = genpareto.fit(myX, loc=np.min(myX))
mydist = genpareto(c=c, loc=loc, scale=scal)
@@ -780,19 +780,19 @@ def paretotails_icdf(pf, nl, nu, temp, my_cdf_vect, my_cdf_x):
plt.show()
"""
- return kernel, gpareto_param_lower, gpareto_param_upper
+ return kernel, gpareto_param_lower, gpareto_param_upper # noqa: F405
-def cpsd_matlab(Components1, Components2, wind_size, nover, nfft, fp):
+def cpsd_matlab(Components1, Components2, wind_size, nover, nfft, fp): # noqa: N803, D103
window = windows.hann(int(wind_size))
ncombs1 = Components1.shape[1]
ncombs2 = Components2.shape[1]
- nSampPoints = int(nfft / 2 + 1)
+ nSampPoints = int(nfft / 2 + 1) # noqa: N806
- if nfft < 2500:
- print('ERROR: time series is too short. Please put a longer duration')
- exit(-1)
+ if nfft < 2500: # noqa: PLR2004
+ print('ERROR: time series is too short. Please put a longer duration') # noqa: T201
+ exit(-1) # noqa: PLR1722
s_target = np.zeros((ncombs1, ncombs2, nSampPoints), dtype='complex_')
@@ -811,14 +811,14 @@ def cpsd_matlab(Components1, Components2, wind_size, nover, nfft, fp):
return s_target, f_target
-def perform_POD(s_target, f_target, ncomp, l_mo, pool):
- S_F = s_target[:, :, 0:] # do not exclude freq = 0 Hz
+def perform_POD(s_target, f_target, ncomp, l_mo, pool): # noqa: N802, D103
+ S_F = s_target[:, :, 0:] # do not exclude freq = 0 Hz # noqa: N806
f_full = f_target[0:] # do not exclude freq = 0 Hz
- SpeN = f_full.shape[0] # exclude freq = 0 Hz
+ SpeN = f_full.shape[0] # exclude freq = 0 Hz # noqa: N806
- Vs = np.zeros((ncomp, ncomp, SpeN), dtype='complex_')
- Ds = np.zeros((ncomp, ncomp, SpeN))
+ Vs = np.zeros((ncomp, ncomp, SpeN), dtype='complex_') # noqa: N806
+ Ds = np.zeros((ncomp, ncomp, SpeN)) # noqa: N806
#
# eigenvalue analysis in parallel
@@ -830,48 +830,48 @@ def perform_POD(s_target, f_target, ncomp, l_mo, pool):
err_exit('Low memory performing POD')
except KeyboardInterrupt:
- print('Ctrl+c received, terminating and joining pool.')
+ print('Ctrl+c received, terminating and joining pool.') # noqa: T201
try:
- self.pool.shutdown()
- except Exception:
- sys.exit()
+ self.pool.shutdown() # noqa: F405
+ except Exception: # noqa: BLE001
+ sys.exit() # noqa: F405
for ii in range(SpeN):
- D_all = result_objs[ii][0]
- V_all = result_objs[ii][1]
+ D_all = result_objs[ii][0] # noqa: N806
+ V_all = result_objs[ii][1] # noqa: N806
ind = np.argsort(D_all)
Ds[:, :, ii] = np.real(np.diag(D_all[ind]))
Vs[:, :, ii] = V_all[:, ind]
# Truncation
- V = np.zeros((ncomp, l_mo, SpeN), dtype='complex_')
- D0 = np.zeros((l_mo, l_mo, SpeN))
+ V = np.zeros((ncomp, l_mo, SpeN), dtype='complex_') # noqa: N806
+ D0 = np.zeros((l_mo, l_mo, SpeN)) # noqa: N806
for tt in range(l_mo):
V[:, tt, :] = Vs[:, ncomp - 1 - tt, :]
D0[tt, tt, :] = Ds[ncomp - 1 - tt, ncomp - 1 - tt, :]
- D1 = np.zeros((l_mo, 1, SpeN))
+ D1 = np.zeros((l_mo, 1, SpeN)) # noqa: N806
for ii in range(SpeN):
D1[:, 0, ii] = np.diag(D0[:, :, ii])
return V, D1, SpeN
-def simulation_gaussian(
+def simulation_gaussian( # noqa: D103, PLR0913
ncomp,
- N_t,
- V_vH,
- D_vH,
- theta_vH,
+ N_t, # noqa: N803
+ V_vH, # noqa: N803
+ D_vH, # noqa: N803
+ theta_vH, # noqa: N803
nf_dir,
- N_f,
+ N_f, # noqa: N803
f_inc,
f,
l_mo,
tvec,
- SpeN,
- V_H,
- vRef,
+ SpeN, # noqa: ARG001, N803
+ V_H, # noqa: N803
+ vRef, # noqa: N803
seed,
seed_num,
):
@@ -879,10 +879,10 @@ def simulation_gaussian(
# Set Seed
#
- folderName = os.path.basename(
- os.getcwd()
+ folderName = os.path.basename( # noqa: PTH119, N806
+ os.getcwd() # noqa: PTH109
) # Lets get n from workdir.n and add this to the seed
- sampNum = folderName.split('.')[-1]
+ sampNum = folderName.split('.')[-1] # noqa: N806
if not sampNum.isnumeric():
np.random.seed(seed[seed_num])
@@ -894,18 +894,18 @@ def simulation_gaussian(
#
# force coefficients initialize matrix
- F_jzm = np.zeros((ncomp, N_t))
+ F_jzm = np.zeros((ncomp, N_t)) # noqa: N806
f_tmp = np.linspace(0, (N_f - 1) * f_inc, N_f)
for m in range(l_mo):
mo = m # current mode #
- Vmo = V_vH[nf_dir, mo, :] # eigenvector for mode mo
+ Vmo = V_vH[nf_dir, mo, :] # eigenvector for mode mo # noqa: N806
# Dmo = D_vH[mo, 0,:] # eigenvalue for mode mo
# To avoid nan when calculating VDmo
- Dmo = D_vH[mo, 0, :] + 1j * 0
+ Dmo = D_vH[mo, 0, :] + 1j * 0 # noqa: N806
thetmo = theta_vH[nf_dir, mo, :] # theta for mode mo
- VDmo = (
+ VDmo = ( # noqa: N806
np.sqrt((V_H / vRef) ** 3)
* np.abs(Vmo)
* (np.ones((ncomp, 1)) * np.sqrt(Dmo))
@@ -916,34 +916,34 @@ def simulation_gaussian(
# Loop over floors
# g_jm = np.zeros((N_t, ncomp),dtype = 'complex_')
- F_jm = np.zeros((ncomp, N_t))
+ F_jm = np.zeros((ncomp, N_t)) # noqa: N806
coef = np.sqrt(2) * np.sqrt(f_inc) * np.exp(1j * varth)
coef2 = np.exp(1j * ((mo + 1) / l_mo * f_inc) * tvec)
- fVDmo = interp1d(f, VDmo, kind='linear', fill_value='extrapolate')
+ fVDmo = interp1d(f, VDmo, kind='linear', fill_value='extrapolate') # noqa: N806
fthetmo = interp1d(f, thetmo, kind='linear', fill_value='extrapolate')
- fV_interp = np.abs(fVDmo(f_tmp))
+ fV_interp = np.abs(fVDmo(f_tmp)) # noqa: N806
fthet_interp = np.exp((1j) * (fthetmo(f_tmp)))
for j in range(ncomp):
# l denotes a particular freq. point
# m denotes a particular mode
# j denotes a particular floor
- fVDmo = interp1d(f, VDmo[j, :], kind='linear', fill_value='extrapolate')
+ fVDmo = interp1d(f, VDmo[j, :], kind='linear', fill_value='extrapolate') # noqa: N806
fthetmo = interp1d(
f, thetmo[j, :], kind='linear', fill_value='extrapolate'
)
- B_jm = np.zeros((N_t,), dtype='complex_')
+ B_jm = np.zeros((N_t,), dtype='complex_') # noqa: N806
B_jm[0:N_f] = coef * fV_interp[j, :] * fthet_interp[j, :]
g_jm = np.fft.ifft(B_jm) * N_t
F_jm[j, :] = np.real(g_jm * coef2)
- # TODO it is hard to tell whether they are similar or not
+ # TODO it is hard to tell whether they are similar or not # noqa: TD002, TD004
# sum up F from different modes (zero - mean)
- F_jzm = F_jzm + F_jm
+ F_jzm = F_jzm + F_jm # noqa: N806
return F_jzm
@@ -952,16 +952,16 @@ def simulation_gaussian(
if __name__ == '__main__':
- inputArgs = sys.argv
+ inputArgs = sys.argv # noqa: N816, F405
# set filenames
- aimName = sys.argv[2]
- evtName = sys.argv[4]
+ aimName = sys.argv[2] # noqa: N816, F405
+ evtName = sys.argv[4] # noqa: N816, F405
- getRV = False
- for myarg in sys.argv:
- if (myarg == '--getRV') or (myarg == 'getRV'):
- getRV = True
+ getRV = False # noqa: N816
+ for myarg in sys.argv: # noqa: F405
+ if (myarg == '--getRV') or (myarg == 'getRV'): # noqa: PLR1714
+ getRV = True # noqa: N816
if error_tag and getRV:
err_exit(
@@ -975,7 +975,7 @@ def simulation_gaussian(
try:
main(aimName, evtName, getRV)
- except Exception as err:
+ except Exception as err: # noqa: BLE001
import traceback
if getRV:
diff --git a/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py b/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py
index ea1f7e74c..96f3359b8 100644
--- a/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py
+++ b/modules/createEVENT/groundMotionIM/IntensityMeasureComputer.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2022 Leland Stanford Junior University
# Copyright (c) 2022 The Regents of the University of California
#
@@ -50,10 +50,10 @@
from scipy.interpolate import interp1d
from scipy.stats.mstats import gmean
-this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
+this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120
main_dir = this_dir.parents[1]
sys.path.insert(0, str(main_dir / 'common'))
-from simcenter_common import *
+from simcenter_common import * # noqa: E402, F403
IM_TYPES = [
'PeakGroundResponse',
@@ -71,8 +71,8 @@
}
-class IntensityMeasureComputer:
- def __init__(self, time_hist_dict=dict(), units=dict(), ampScaled=False):
+class IntensityMeasureComputer: # noqa: D101
+ def __init__(self, time_hist_dict=dict(), units=dict(), ampScaled=False): # noqa: FBT002, B006, C408, ARG002, N803
self.time_hist_dict = time_hist_dict
self.units = units
self._define_constants()
@@ -82,7 +82,7 @@ def __init__(self, time_hist_dict=dict(), units=dict(), ampScaled=False):
from_acc_unit = units.get('acceleration')
else:
from_acc_unit = '{}/{}^2'.format(units['length'], units['time'])
- for cur_hist_name, cur_hist in self.time_hist_dict.items():
+ for cur_hist_name, cur_hist in self.time_hist_dict.items(): # noqa: B007, PERF102
cur_hist[2] = self.convert_accel_units(
cur_hist[2], from_acc_unit
).tolist()
@@ -110,23 +110,23 @@ def _define_constants(self):
def _init_intensity_measures(self):
# response spectra
- self.periods = dict()
- self.disp_spectrum = dict()
- self.vel_spectrum = dict()
- self.acc_spectrum = dict()
- self.psv = dict()
- self.psa = dict()
+ self.periods = dict() # noqa: C408
+ self.disp_spectrum = dict() # noqa: C408
+ self.vel_spectrum = dict() # noqa: C408
+ self.acc_spectrum = dict() # noqa: C408
+ self.psv = dict() # noqa: C408
+ self.psa = dict() # noqa: C408
# peak ground responses
- self.pga = dict()
- self.pgv = dict()
- self.pgd = dict()
+ self.pga = dict() # noqa: C408
+ self.pgv = dict() # noqa: C408
+ self.pgd = dict() # noqa: C408
# arias intensity
- self.i_a = dict()
+ self.i_a = dict() # noqa: C408
# significant duration
- self.ds575 = dict()
- self.ds595 = dict()
+ self.ds575 = dict() # noqa: C408
+ self.ds595 = dict() # noqa: C408
# saratio
- self.saratio = dict()
+ self.saratio = dict() # noqa: C408
# all
self.intensity_measures = {
@@ -162,8 +162,8 @@ def _init_intensity_measures(self):
'SaRatio': 'scalar',
}
- def convert_accel_units(self, acceleration, from_, to_='cm/sec/sec'):
- """Converts acceleration from/to different units"""
+ def convert_accel_units(self, acceleration, from_, to_='cm/sec/sec'): # noqa: C901, PLR0911, PLR0912
+ """Converts acceleration from/to different units""" # noqa: D400, D401
acceleration = np.asarray(acceleration)
if from_ == 'g':
if to_ == 'g':
@@ -309,9 +309,9 @@ def convert_accel_units(self, acceleration, from_, to_='cm/sec/sec'):
if to_ in self.mile_sec_square:
return acceleration
- raise ValueError(f'Unrecognized unit {from_}')
+ raise ValueError(f'Unrecognized unit {from_}') # noqa: EM102, TRY003
- def compute_response_spectrum(self, periods=[], damping=0.05, im_units=dict()):
+ def compute_response_spectrum(self, periods=[], damping=0.05, im_units=dict()): # noqa: B006, C408, D102
if len(im_units) == 0:
unit_factor_vspec = 1.0
unit_factor_aspec = 1.0
@@ -341,7 +341,7 @@ def compute_response_spectrum(self, periods=[], damping=0.05, im_units=dict()):
# psa is in g, psv in cm/sec
if len(periods) == 0:
return
- elif type(periods) == list:
+ elif type(periods) == list: # noqa: RET505, E721
periods = np.array(periods)
num_periods = len(periods)
@@ -434,7 +434,7 @@ def compute_response_spectrum(self, periods=[], damping=0.05, im_units=dict()):
)
self.periods.update({cur_hist_name: periods.tolist()})
- def compute_peak_ground_responses(self, im_units=dict()):
+ def compute_peak_ground_responses(self, im_units=dict()): # noqa: B006, C408, D102
if len(im_units) == 0:
unit_factor_pga = 1.0
unit_factor_pgv = 1.0
@@ -455,7 +455,7 @@ def compute_peak_ground_responses(self, im_units=dict()):
for cur_hist_name, cur_hist in self.time_hist_dict.items():
dt = cur_hist[1]
ground_acc = cur_hist[2]
- num_steps = len(ground_acc)
+ num_steps = len(ground_acc) # noqa: F841
# integral
velocity = dt * cumtrapz(ground_acc, initial=0.0)
displacement = dt * cumtrapz(velocity, initial=0.0)
@@ -476,7 +476,7 @@ def compute_peak_ground_responses(self, im_units=dict()):
{cur_hist_name: np.max(np.fabs(displacement)) * unit_factor_pgd}
)
- def compute_arias_intensity(self, im_units=dict()):
+ def compute_arias_intensity(self, im_units=dict()): # noqa: B006, C408, D102
if len(im_units) == 0:
unit_factor_ai = 1.0
unit_factor_ds575 = 1.0
@@ -496,10 +496,10 @@ def compute_arias_intensity(self, im_units=dict()):
for cur_hist_name, cur_hist in self.time_hist_dict.items():
dt = cur_hist[1]
ground_acc = cur_hist[2]
- num_steps = len(ground_acc)
+ num_steps = len(ground_acc) # noqa: F841
tmp = [x**2 / 100 / 100 for x in ground_acc]
# integral
- I_A = np.pi / 2 / self.g * dt * cumtrapz(tmp, initial=0.0)
+ I_A = np.pi / 2 / self.g * dt * cumtrapz(tmp, initial=0.0) # noqa: N806
# collect data
self.i_a.update({cur_hist_name: np.max(np.fabs(I_A)) * unit_factor_ai})
# compute significant duration
@@ -507,23 +507,23 @@ def compute_arias_intensity(self, im_units=dict()):
self.ds575.update({cur_hist_name: ds575 * unit_factor_ds575})
self.ds595.update({cur_hist_name: ds595 * unit_factor_ds595})
- def _compute_significant_duration(self, I_A, dt):
+ def _compute_significant_duration(self, I_A, dt): # noqa: N803
# note this function return duration in sec
ds575 = 0.0
ds595 = 0.0
# normalize
- I_A_n = I_A / np.max(I_A)
+ I_A_n = I_A / np.max(I_A) # noqa: N806
# find 5%, 75%, 95%
- id5 = next(x for x, val in enumerate(I_A_n) if val > 0.05)
- id75 = next(x for x, val in enumerate(I_A_n) if val > 0.75)
- id95 = next(x for x, val in enumerate(I_A_n) if val > 0.95)
+ id5 = next(x for x, val in enumerate(I_A_n) if val > 0.05) # noqa: PLR2004
+ id75 = next(x for x, val in enumerate(I_A_n) if val > 0.75) # noqa: PLR2004
+ id95 = next(x for x, val in enumerate(I_A_n) if val > 0.95) # noqa: PLR2004
# compute ds
ds575 = dt * (id75 - id5)
ds595 = dt * (id95 - id5)
# return
return ds575, ds595
- def compute_saratio(self, T1=1.0, Ta=0.02, Tb=3.0, im_units=dict()):
+ def compute_saratio(self, T1=1.0, Ta=0.02, Tb=3.0, im_units=dict()): # noqa: B006, C408, N803, D102
if len(self.psa) == 0:
return
@@ -538,7 +538,7 @@ def compute_saratio(self, T1=1.0, Ta=0.02, Tb=3.0, im_units=dict()):
period_list = [0.01 * x for x in range(1500)]
period_list = [x for x in period_list if x <= Tb and x >= Ta]
- for cur_hist_name, cur_hist in self.time_hist_dict.items():
+ for cur_hist_name, cur_hist in self.time_hist_dict.items(): # noqa: B007, PERF102
cur_psa = self.psa.get(cur_hist_name, None)
cur_periods = self.periods.get(cur_hist_name, None)
if (cur_psa is None) or (cur_periods is None):
@@ -551,34 +551,34 @@ def compute_saratio(self, T1=1.0, Ta=0.02, Tb=3.0, im_units=dict()):
)
-def load_records(event_file, ampScaled):
+def load_records(event_file, ampScaled): # noqa: N803, D103
event_data = event_file.get('Events', None)
if event_data is None:
- raise ValueError(
- "IntensityMeasureComputer: 'Events' attribute is not found in EVENT.json"
+ raise ValueError( # noqa: TRY003
+ "IntensityMeasureComputer: 'Events' attribute is not found in EVENT.json" # noqa: EM101
)
- else:
+ else: # noqa: RET506
event_data = event_data[0]
# check type
if (event_data.get('type', None) != 'Seismic') and (
event_data.get('type', None) != 'timeHistory'
):
- return dict()
+ return dict() # noqa: C408
# get time series attribute
time_series = event_data.get('timeSeries', None)
if time_series is None:
- return dict()
+ return dict() # noqa: C408
ts_names = [x['name'] for x in time_series]
# collect time series tags
pattern = event_data.get('pattern', None)
if pattern is None:
- raise ValueError(
- "IntensityMeasureComputer: 'pattern' is not found in EVENT.json"
+ raise ValueError( # noqa: TRY003
+ "IntensityMeasureComputer: 'pattern' is not found in EVENT.json" # noqa: EM101
)
- dict_ts = dict()
+ dict_ts = dict() # noqa: C408
for cur_pat in pattern:
dict_ts.update({cur_pat['timeSeries']: [cur_pat['dof']]})
@@ -586,15 +586,15 @@ def load_records(event_file, ampScaled):
for cur_ts in list(dict_ts.keys()):
try:
cur_id = ts_names.index(cur_ts)
- except:
- raise ValueError(
- f"IntensityMeasureComputer: {cur_ts} is not found in 'timeSeries' in EVENT.json"
+ except: # noqa: E722
+ raise ValueError( # noqa: B904, TRY003
+ f"IntensityMeasureComputer: {cur_ts} is not found in 'timeSeries' in EVENT.json" # noqa: EM102
)
# get amplitude scaling (if the record is raw, i.e., ampScaled is false)
if not ampScaled:
- scalingFactor = time_series[cur_id].get('factor', 1.0)
+ scalingFactor = time_series[cur_id].get('factor', 1.0) # noqa: N806
else:
- scalingFactor = 1.0
+ scalingFactor = 1.0 # noqa: N806
# append that record
dict_ts[cur_ts].append(time_series[cur_id]['dT'])
dict_ts[cur_ts].append(
@@ -605,45 +605,45 @@ def load_records(event_file, ampScaled):
return dict_ts
-def get_unit_factor(unit_in, unit_out):
+def get_unit_factor(unit_in, unit_out): # noqa: D103
# this function is geared to the unit names in SimCenterUnitsCombo in R2D.
unit_factor = 1.0
# unit types
unit_types = globals().get('unit_types')
f_out = 1
f_in = 1
- for cur_unit, name_list in unit_types.items():
+ for cur_unit, name_list in unit_types.items(): # noqa: B007, PERF102
if unit_out in name_list:
f_out = globals().get(unit_out)
if unit_in in name_list:
f_in = globals().get(unit_in)
unit_factor = f_in / f_out
- return unit_factor
+ return unit_factor # noqa: RET504
-def main(AIM_file, EVENT_file, IM_file, unitScaled, ampScaled, geoMean):
+def main(AIM_file, EVENT_file, IM_file, unitScaled, ampScaled, geoMean): # noqa: C901, N803, D103
# load AIM file
try:
- with open(AIM_file, encoding='utf-8') as f:
- AIM_file = json.load(f)
- except:
- raise ValueError(
- f'IntensityMeasureComputer: cannot load AIM file {AIM_file}'
+ with open(AIM_file, encoding='utf-8') as f: # noqa: PTH123
+ AIM_file = json.load(f) # noqa: N806
+ except: # noqa: E722
+ raise ValueError( # noqa: B904, TRY003
+ f'IntensityMeasureComputer: cannot load AIM file {AIM_file}' # noqa: EM102
)
# load EVENT file
try:
- with open(EVENT_file, encoding='utf-8') as f:
+ with open(EVENT_file, encoding='utf-8') as f: # noqa: PTH123
event_file = json.load(f)
- except:
- raise ValueError(
- f'IntensityMeasureComputer: cannot load EVENT file {EVENT_file}'
+ except: # noqa: E722
+ raise ValueError( # noqa: B904, TRY003
+ f'IntensityMeasureComputer: cannot load EVENT file {EVENT_file}' # noqa: EM102
)
# get periods
- AIM_event = AIM_file['Events']
- if type(AIM_event) == list:
- AIM_event = AIM_event[0]
+ AIM_event = AIM_file['Events'] # noqa: N806
+ if type(AIM_event) == list: # noqa: E721
+ AIM_event = AIM_event[0] # noqa: N806
periods = AIM_event.get(
'SpectrumPeriod',
[
@@ -673,8 +673,8 @@ def main(AIM_file, EVENT_file, IM_file, unitScaled, ampScaled, geoMean):
# corresponding to records after SimCenterEvent.py
units = AIM_file['GeneralInformation'].get('units', None)
if units is None:
- raise ValueError(
- f'IntensityMeasureComputer: units is not found in {AIM_file}'
+ raise ValueError( # noqa: TRY003
+ f'IntensityMeasureComputer: units is not found in {AIM_file}' # noqa: EM102
)
else:
# corresponding to raw records (e.g., EE-UQ)
@@ -682,14 +682,14 @@ def main(AIM_file, EVENT_file, IM_file, unitScaled, ampScaled, geoMean):
# get IM list (will be user-defined)
im_types = [] # IM type
- im_units = dict()
+ im_units = dict() # noqa: C408
im_names = ['Periods'] # IM name
- AIM_im = AIM_file.get('IntensityMeasure', None)
+ AIM_im = AIM_file.get('IntensityMeasure', None) # noqa: N806
output_periods = []
process_geomean = False
if AIM_im is None:
# search it again under UQ/surrogateMethodInfo
- AIM_im = AIM_file['UQ']['surrogateMethodInfo'].get('IntensityMeasure', None)
+ AIM_im = AIM_file['UQ']['surrogateMethodInfo'].get('IntensityMeasure', None) # noqa: N806
if geoMean:
process_geomean = AIM_file['UQ']['surrogateMethodInfo'].get(
'useGeoMean', False
@@ -701,7 +701,7 @@ def main(AIM_file, EVENT_file, IM_file, unitScaled, ampScaled, geoMean):
if AIM_im is None or len(AIM_im) == 0:
# no intensity measure calculation requested
return
- else:
+ else: # noqa: RET505
for cur_im in list(AIM_im.keys()):
for ref_type in IM_TYPES:
if cur_im in IM_MAP.get(ref_type):
@@ -737,16 +737,16 @@ def main(AIM_file, EVENT_file, IM_file, unitScaled, ampScaled, geoMean):
output_periods = periods
if cur_im == 'SaRatio':
tmp = AIM_im[cur_im].get('Periods', [0.02, 1.0, 3.0])
- Ta, Tb = [np.min(tmp), np.max(tmp)]
+ Ta, Tb = [np.min(tmp), np.max(tmp)] # noqa: N806
tmp.pop(tmp.index(Ta))
tmp.pop(tmp.index(Tb))
- T1 = tmp[0]
+ T1 = tmp[0] # noqa: N806
periods = [
Ta + 0.01 * (x - 1)
for x in range(int(np.ceil((Tb - Ta) / 0.01)) + 3)
]
break
- for Ti in output_periods:
+ for Ti in output_periods: # noqa: N806
if Ti not in periods:
bisect.insort(periods, Ti)
@@ -780,24 +780,24 @@ def main(AIM_file, EVENT_file, IM_file, unitScaled, ampScaled, geoMean):
# save a IM.json
out_data = {'IntensityMeasure': im_computer.intensity_measures}
- with open(IM_file, 'w', encoding='utf-8') as f:
+ with open(IM_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(out_data, f, indent=2)
# save a csv file
- csv_dict = dict()
+ csv_dict = dict() # noqa: C408
colname = []
for cur_im in im_types:
colname = colname + IM_MAP.get(cur_im, [])
im_dict = im_computer.intensity_measures
for cur_hist_name, cur_hist in dict_time_series.items():
- cur_colname = []
+ cur_colname = [] # noqa: F841
cur_dof = cur_hist[0]
cur_periods = im_dict['Periods'].get(cur_hist_name)
for cur_im in im_names:
if cur_im in IM_MAP.get('PseudoSpectrum'):
if len(output_periods) > 0:
- for Ti in output_periods:
- cur_im_T = f'{cur_im}({Ti}s)'
+ for Ti in output_periods: # noqa: N806
+ cur_im_T = f'{cur_im}({Ti}s)' # noqa: N806
tmp_key = f'1-{cur_im_T}-0-{cur_dof}'
if len(cur_periods) > 1:
# interp
@@ -849,7 +849,7 @@ def main(AIM_file, EVENT_file, IM_file, unitScaled, ampScaled, geoMean):
]
get_count_dict[new_key_name] += 1
- for key, val in geo_csv_dict.items():
+ for key, val in geo_csv_dict.items(): # noqa: B007
geo_csv_dict[key] = [
a ** (1 / get_count_dict[key]) for a in geo_csv_dict[key]
]
@@ -860,9 +860,9 @@ def main(AIM_file, EVENT_file, IM_file, unitScaled, ampScaled, geoMean):
csv_df = pd.DataFrame.from_dict(csv_dict)
tmp_idx = IM_file.index('.')
if tmp_idx:
- filenameCSV = IM_file[:tmp_idx] + '.csv'
+ filenameCSV = IM_file[:tmp_idx] + '.csv' # noqa: N806
else:
- filenameCSV = IM_file + '.csv'
+ filenameCSV = IM_file + '.csv' # noqa: N806
csv_df.to_csv(filenameCSV, index=False)
diff --git a/modules/createEVENT/hazardBasedEvent/HazardBasedEvent.py b/modules/createEVENT/hazardBasedEvent/HazardBasedEvent.py
index 60c06f534..ce2d5b0dc 100644
--- a/modules/createEVENT/hazardBasedEvent/HazardBasedEvent.py
+++ b/modules/createEVENT/hazardBasedEvent/HazardBasedEvent.py
@@ -1,4 +1,4 @@
-# This python script process the input and will use it to run SHA and ground motion selection
+# This python script process the input and will use it to run SHA and ground motion selection # noqa: INP001, D100
# In addition to providing the event file
import glob
@@ -9,15 +9,15 @@
import sys
-def computeScenario(gmConfig, location):
- scriptDir = os.path.dirname(os.path.realpath(__file__))
- eqHazardPath = f'{scriptDir}/GMU/EQHazard.jar'
- simulateIMPath = f'{scriptDir}/GMU/SimulateIM'
- selectRecordPath = f'{scriptDir}/GMU/SelectRecord'
- recordDatabasePath = f'{scriptDir}/GMU/NGAWest2-1000.csv'
+def computeScenario(gmConfig, location): # noqa: N802, N803, D103
+ scriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N806
+ eqHazardPath = f'{scriptDir}/GMU/EQHazard.jar' # noqa: N806
+ simulateIMPath = f'{scriptDir}/GMU/SimulateIM' # noqa: N806
+ selectRecordPath = f'{scriptDir}/GMU/SelectRecord' # noqa: N806
+ recordDatabasePath = f'{scriptDir}/GMU/NGAWest2-1000.csv' # noqa: N806
# Separate Selection Config
- selectionConfig = gmConfig['RecordSelection']
+ selectionConfig = gmConfig['RecordSelection'] # noqa: N806
del gmConfig['RecordSelection']
gmConfig['Site'] = {}
@@ -28,20 +28,20 @@ def computeScenario(gmConfig, location):
# Adding the required output
gmConfig['IntensityMeasure']['EnableJsonOutput'] = True
- with open(
+ with open( # noqa: PTH123
'./HazardWorkDir/Hazard_Scenario.json', 'w', encoding='utf-8'
- ) as hazardFile:
+ ) as hazardFile: # noqa: N806
json.dump(gmConfig, hazardFile, indent=4)
# Now we need to run the EQHazard Process
- hazardCommand = [
+ hazardCommand = [ # noqa: N806
'java',
'-jar',
eqHazardPath,
'./HazardWorkDir/Hazard_Scenario.json',
'./HazardWorkDir/Hazard_Output.json',
]
- hazardResult = subprocess.call(hazardCommand)
+ hazardResult = subprocess.call(hazardCommand) # noqa: S603, N806
if hazardResult != 0:
sys.stderr.write('Hazard analysis failed!')
@@ -49,22 +49,22 @@ def computeScenario(gmConfig, location):
# Now we need to run the SimulateIM Process
# First we create a simulation config
- simConfig = {
+ simConfig = { # noqa: N806
'GroundMotions': {'File': './HazardWorkDir/Hazard_Output.json'},
'NumSimulations': 1,
'SpatialCorrelation': True,
}
- with open(
+ with open( # noqa: PTH123
'./HazardWorkDir/Sim_Config.json', 'w', encoding='utf-8'
- ) as simConfigFile:
+ ) as simConfigFile: # noqa: N806
json.dump(simConfig, simConfigFile, indent=4)
- simulateCommand = [
+ simulateCommand = [ # noqa: N806
simulateIMPath,
'./HazardWorkDir/Sim_Config.json',
'./HazardWorkDir/Hazard_Sim.json',
]
- simResult = subprocess.call(simulateCommand)
+ simResult = subprocess.call(simulateCommand) # noqa: S603, N806
if simResult != 0:
sys.stderr.write('Intensity measure simulation failed!')
@@ -74,27 +74,27 @@ def computeScenario(gmConfig, location):
#
selectionConfig['Target']['File'] = './HazardWorkDir/Hazard_Sim.json'
selectionConfig['Database']['File'] = recordDatabasePath
- with open(
+ with open( # noqa: PTH123
'./HazardWorkDir/Selection_Config.json', 'w', encoding='utf-8'
- ) as selectionConfigFile:
+ ) as selectionConfigFile: # noqa: N806
json.dump(selectionConfig, selectionConfigFile, indent=4)
- selectionCommand = [
+ selectionCommand = [ # noqa: N806
selectRecordPath,
'./HazardWorkDir/Selection_Config.json',
'./HazardWorkDir/Records_Selection.json',
]
- simResult = subprocess.call(selectionCommand)
+ simResult = subprocess.call(selectionCommand) # noqa: S603, N806
- if simResult != 0:
+ if simResult != 0: # noqa: RET503
sys.stderr.write('Intensity measure simulation failed!')
return -2
-def readNGAWest2File(ngaW2FilePath, scaleFactor):
+def readNGAWest2File(ngaW2FilePath, scaleFactor): # noqa: N802, N803, D103
series = []
dt = 0.0
- with open(ngaW2FilePath) as recordFile:
- canRead = False # We need to process the header first
+ with open(ngaW2FilePath) as recordFile: # noqa: PTH123, N806
+ canRead = False # We need to process the header first # noqa: N806
for line in recordFile:
if canRead:
series.extend(
@@ -105,42 +105,42 @@ def readNGAWest2File(ngaW2FilePath, scaleFactor):
dt = float(
re.match(r'NPTS=.+, DT=\s+([0-9\.]+)\s+SEC', line).group(1)
)
- canRead = True
+ canRead = True # noqa: N806
return series, dt
-def createNGAWest2Event(rsn, scaleFactor, recordsFolder, eventFilePath):
- pattern = os.path.join(recordsFolder, 'RSN') + str(rsn) + '_*.AT2'
- recordFiles = glob.glob(pattern)
- if len(recordFiles) != 2:
- print(
+def createNGAWest2Event(rsn, scaleFactor, recordsFolder, eventFilePath): # noqa: N802, N803, D103
+ pattern = os.path.join(recordsFolder, 'RSN') + str(rsn) + '_*.AT2' # noqa: PTH118
+ recordFiles = glob.glob(pattern) # noqa: PTH207, N806
+ if len(recordFiles) != 2: # noqa: PLR2004
+ print( # noqa: T201
'Error finding NGA West 2 files.\n'
f'Please download the files for record {rsn} '
f'from NGA West 2 website and place them in the records folder ({recordsFolder})'
)
- exit(-1)
+ exit(-1) # noqa: PLR1722
h1, dt1 = readNGAWest2File(recordFiles[0], scaleFactor)
h2, dt2 = readNGAWest2File(recordFiles[1], scaleFactor)
- patternH1 = {}
+ patternH1 = {} # noqa: N806
patternH1['type'] = 'UniformAcceleration'
patternH1['timeSeries'] = 'accel_X'
patternH1['dof'] = 1
- patternH2 = {}
+ patternH2 = {} # noqa: N806
patternH2['type'] = 'UniformAcceleration'
patternH2['timeSeries'] = 'accel_Y'
patternH2['dof'] = 2
- seriesH1 = {}
+ seriesH1 = {} # noqa: N806
seriesH1['name'] = 'accel_X'
seriesH1['type'] = 'Value'
seriesH1['dT'] = dt1
seriesH1['data'] = h1
- seriesH2 = {}
+ seriesH2 = {} # noqa: N806
seriesH2['name'] = 'accel_Y'
seriesH2['type'] = 'Value'
seriesH2['dT'] = dt2
@@ -161,52 +161,52 @@ def createNGAWest2Event(rsn, scaleFactor, recordsFolder, eventFilePath):
event['pattern'] = [patternH1, patternH2]
event['units'] = {'length': 'm', 'time': 'sec'}
- eventsDict = {}
+ eventsDict = {} # noqa: N806
eventsDict['Events'] = [event]
eventsDict['RandomVariables'] = []
- with open(eventFilePath, 'w', encoding='utf-8') as eventFile:
+ with open(eventFilePath, 'w', encoding='utf-8') as eventFile: # noqa: PTH123, N806
json.dump(eventsDict, eventFile, indent=4)
-def main():
- inputArgs = sys.argv
+def main(): # noqa: D103
+ inputArgs = sys.argv # noqa: N806
# Process only if --getRV is passed
if '--getRV' not in inputArgs:
sys.exit(0)
# First let's process the arguments
- argBIM = inputArgs.index('--filenameAIM') + 1
- bimFilePath = inputArgs[argBIM]
- argEVENT = inputArgs.index('--filenameEVENT') + 1
- eventFilePath = inputArgs[argEVENT]
+ argBIM = inputArgs.index('--filenameAIM') + 1 # noqa: N806
+ bimFilePath = inputArgs[argBIM] # noqa: N806
+ argEVENT = inputArgs.index('--filenameEVENT') + 1 # noqa: N806
+ eventFilePath = inputArgs[argEVENT] # noqa: N806
# Ensure a hazard cache folder exist
- if not os.path.exists('./HazardWorkDir'):
- os.mkdir('./HazardWorkDir')
+ if not os.path.exists('./HazardWorkDir'): # noqa: PTH110
+ os.mkdir('./HazardWorkDir') # noqa: PTH102
- with open(bimFilePath, encoding='utf-8') as bimFile:
+ with open(bimFilePath, encoding='utf-8') as bimFile: # noqa: PTH123, N806
bim = json.load(bimFile)
location = [
bim['GeneralInformation']['location']['latitude'],
bim['GeneralInformation']['location']['longitude'],
]
- scriptDir = os.path.dirname(os.path.realpath(__file__))
- recordsFolder = f'{scriptDir}/GMU/NGAWest2Records'
+ scriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N806
+ recordsFolder = f'{scriptDir}/GMU/NGAWest2Records' # noqa: N806
computeScenario(bim['Events'][0]['GroundMotion'], location)
# We need to read the building location
# Now we can start processing the event
- with open('./HazardWorkDir/Records_Selection.json') as selectionFile:
- recordSelection = json.load(selectionFile)
+ with open('./HazardWorkDir/Records_Selection.json') as selectionFile: # noqa: PTH123, N806
+ recordSelection = json.load(selectionFile) # noqa: N806
- selectedRecord = recordSelection['GroundMotions'][0]
+ selectedRecord = recordSelection['GroundMotions'][0] # noqa: N806
rsn = selectedRecord['Record']['Id']
- scaleFactor = selectedRecord['ScaleFactor']
+ scaleFactor = selectedRecord['ScaleFactor'] # noqa: N806
createNGAWest2Event(rsn, scaleFactor, recordsFolder, eventFilePath)
diff --git a/modules/createEVENT/pointWindSpeed/parseHurricaneScenario.py b/modules/createEVENT/pointWindSpeed/parseHurricaneScenario.py
index 760a5190d..a771b3b5c 100755
--- a/modules/createEVENT/pointWindSpeed/parseHurricaneScenario.py
+++ b/modules/createEVENT/pointWindSpeed/parseHurricaneScenario.py
@@ -1,4 +1,4 @@
-# python code to open the TPU .mat file
+# python code to open the TPU .mat file # noqa: EXE002, INP001, D100
# and put data into a SimCenter JSON file for
# wind tunnel data
@@ -7,35 +7,35 @@
import scipy.io as sio
-inputArgs = sys.argv
+inputArgs = sys.argv # noqa: N816
-print('Number of arguments: %d' % len(sys.argv))
-print('The arguments are: %s' % str(sys.argv))
+print('Number of arguments: %d' % len(sys.argv)) # noqa: T201
+print('The arguments are: %s' % str(sys.argv)) # noqa: T201, UP031
# set filenames
-matFileIN = sys.argv[1]
-jsonFileOUT = sys.argv[2]
+matFileIN = sys.argv[1] # noqa: N816
+jsonFileOUT = sys.argv[2] # noqa: N816
-dataDir = os.getcwd()
-scriptDir = os.path.dirname(os.path.realpath(__file__))
+dataDir = os.getcwd() # noqa: PTH109, N816
+scriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N816
-def parseMatFile(matFileIn, windFileOutName):
- file = open(windFileOutName, 'w')
+def parseMatFile(matFileIn, windFileOutName): # noqa: N802, N803, D103
+ file = open(windFileOutName, 'w') # noqa: SIM115, PTH123
mat_contents = sio.loadmat(matFileIn)
- print(mat_contents['wind'])
- windData = mat_contents['wind'][0][0]
+ print(mat_contents['wind']) # noqa: T201
+ windData = mat_contents['wind'][0][0] # noqa: N806
f = windData[0]
lat = windData[1]
long = windData[2]
- numLocations = lat.shape[0]
- print(lat.shape)
+ numLocations = lat.shape[0] # noqa: N806
+ print(lat.shape) # noqa: T201
file.write('{')
file.write('"wind":[')
for i in range(numLocations):
- locSpeed = f[i]
- locLat = lat[i]
- locLong = long[i]
+ locSpeed = f[i] # noqa: N806
+ locLat = lat[i] # noqa: N806
+ locLong = long[i] # noqa: N806
if i == numLocations - 1:
file.write(
diff --git a/modules/createEVENT/siteResponse/Gauss1D.py b/modules/createEVENT/siteResponse/Gauss1D.py
index 50c594ff1..2e48cffac 100644
--- a/modules/createEVENT/siteResponse/Gauss1D.py
+++ b/modules/createEVENT/siteResponse/Gauss1D.py
@@ -1,10 +1,10 @@
-from cmath import exp, pi, sqrt
+from cmath import exp, pi, sqrt # noqa: INP001, D100
import numpy as np
-class gauss1D:
- def __init__(self, Ly, Ny, sigma=1.0, d=1.0):
+class gauss1D: # noqa: D101
+ def __init__(self, Ly, Ny, sigma=1.0, d=1.0): # noqa: N803
# overall length in x-direction
self.Lx = 1
# overall length in y-direction
@@ -31,7 +31,7 @@ def __init__(self, Ly, Ny, sigma=1.0, d=1.0):
self.kxu = self.Nx * self.dkx
self.kyu = self.Ny * self.dky
- def calculate(self):
+ def calculate(self): # noqa: D102
# matrix of random phase angles
phi = 2 * pi * np.random.rand(self.Mx, self.My)
psi = 2 * pi * np.random.rand(self.Mx, self.My)
@@ -50,14 +50,14 @@ def calculate(self):
for ll in range(self.My):
kyl = ll * self.dky
kappa = sqrt(kxk**2 + kyl**2)
- Sgg = (
+ Sgg = ( # noqa: N806
self.sigma**2
* self.d**2
* exp(-(self.d**2) * abs(kappa) ** 2 / 4.0)
/ 4.0
/ pi
)
- Akl = sqrt(2 * Sgg * self.dkx * self.dky)
+ Akl = sqrt(2 * Sgg * self.dkx * self.dky) # noqa: N806
f2[ll] = Akl * exp(1j * phi[kk, ll]) * exp(1j * kyl * yq)
f2sum = np.sum(f2)
part1[kk] = np.real(sqrt(2) * np.sum(f2sum * f1[kk]))
@@ -68,14 +68,14 @@ def calculate(self):
for ll in range(self.My):
kyl = ll * self.dky
kappa = sqrt(kxk**2 + kyl**2)
- Sgg = (
+ Sgg = ( # noqa: N806
self.sigma**2
* self.d**2
* exp(-(self.d**2) * abs(kappa) ** 2 / 4.0)
/ 4.0
/ pi
)
- Akl = sqrt(2 * Sgg * self.dkx * self.dky)
+ Akl = sqrt(2 * Sgg * self.dkx * self.dky) # noqa: N806
f4[ll] = Akl * exp(1j * psi[kk, ll]) * exp(-1j * kyl * yq)
f4sum = np.sum(f4)
part2[kk] = np.real(sqrt(2) * np.sum(f4sum * f3[kk]))
@@ -83,8 +83,8 @@ def calculate(self):
self.f[pp, qq] = part1.sum() + part2.sum()
-def printField(self):
- print(self.f)
+def printField(self): # noqa: N802, D103
+ print(self.f) # noqa: T201
if __name__ == '__main__':
diff --git a/modules/createEVENT/siteResponse/RegionalSiteResponse.py b/modules/createEVENT/siteResponse/RegionalSiteResponse.py
index ee6f1e91c..dd4f7179b 100644
--- a/modules/createEVENT/siteResponse/RegionalSiteResponse.py
+++ b/modules/createEVENT/siteResponse/RegionalSiteResponse.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -52,23 +52,23 @@
import numpy as np
from scipy import integrate
-this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
+this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120
main_dir = this_dir.parents[1]
sys.path.insert(0, str(main_dir / 'common'))
-from simcenter_common import *
+from simcenter_common import * # noqa: E402, F403
-convert_EDP = {'max_abs_acceleration': 'PGA'}
+convert_EDP = {'max_abs_acceleration': 'PGA'} # noqa: N816
-gravityG = 9.81 # m/s2
+gravityG = 9.81 # m/s2 # noqa: N816
# default element size before wave length check
-elementSize = 0.5 # m
+elementSize = 0.5 # m # noqa: N816
# site class B, m/s
VsRock = 760
-plotFlag = False
+plotFlag = False # noqa: N816
-def get_scale_factors(input_units, output_units):
- """Determine the scale factor to convert input event to internal event data"""
+def get_scale_factors(input_units, output_units): # noqa: C901
+ """Determine the scale factor to convert input event to internal event data""" # noqa: D400
# special case: if the input unit is not specified then do not do any scaling
if input_units is None:
scale_factors = {'ALL': 1.0}
@@ -80,13 +80,13 @@ def get_scale_factors(input_units, output_units):
unit_length = output_units.get('length', 'inch')
f_length = globals().get(unit_length, None)
if f_length is None:
- raise ValueError(f'Specified length unit not recognized: {unit_length}')
+ raise ValueError(f'Specified length unit not recognized: {unit_length}') # noqa: EM102, TRY003
# if no time unit is specified, 'sec' is assumed
unit_time = output_units.get('time', 'sec')
f_time = globals().get(unit_time, None)
if f_time is None:
- raise ValueError(f'Specified time unit not recognized: {unit_time}')
+ raise ValueError(f'Specified time unit not recognized: {unit_time}') # noqa: EM102, TRY003
scale_factors = {}
@@ -99,8 +99,8 @@ def get_scale_factors(input_units, output_units):
# get the scale factor to standard units
f_in = globals().get(input_unit, None)
if f_in is None:
- raise ValueError(
- f'Input unit for event files not recognized: {input_unit}'
+ raise ValueError( # noqa: TRY003
+ f'Input unit for event files not recognized: {input_unit}' # noqa: EM102
)
unit_type = None
@@ -109,7 +109,7 @@ def get_scale_factors(input_units, output_units):
unit_type = base_unit_type
if unit_type is None:
- raise ValueError(f'Failed to identify unit type: {input_unit}')
+ raise ValueError(f'Failed to identify unit type: {input_unit}') # noqa: EM102, TRY003
# the output unit depends on the unit type
if unit_type == 'acceleration':
@@ -122,8 +122,8 @@ def get_scale_factors(input_units, output_units):
f_out = 1.0 / f_length
else:
- raise ValueError(
- f'Unexpected unit type in workflow: {unit_type}'
+ raise ValueError( # noqa: TRY003
+ f'Unexpected unit type in workflow: {unit_type}' # noqa: EM102
)
# the scale factor is the product of input and output scaling
@@ -134,7 +134,7 @@ def get_scale_factors(input_units, output_units):
return scale_factors
-def postProcess(evtName, input_units, f_scale_units):
+def postProcess(evtName, input_units, f_scale_units): # noqa: N802, N803, D103
# if f_scale_units is None
if None in [input_units, f_scale_units]:
f_scale = 1.0
@@ -157,28 +157,28 @@ def postProcess(evtName, input_units, f_scale_units):
# acc_surf = acc[:,-2] / 9.81
# KZ, 03/07/2022: removed the unit conversion here (did in createGM4BIM)
acc_surf = acc[:, -3]
- dT = time[1] - time[0]
+ dT = time[1] - time[0] # noqa: N806
- timeSeries = dict(
+ timeSeries = dict( # noqa: C408, N806
name='accel_X',
type='Value',
dT=dT,
data=[x * f_scale for x in acc_surf.tolist()],
)
- patterns = dict(type='UniformAcceleration', timeSeries='accel_X', dof=1)
+ patterns = dict(type='UniformAcceleration', timeSeries='accel_X', dof=1) # noqa: C408
# KZ, 01/17/2022: I added global y direction
# KZ, 03/07/2022: removed the unit conversion here (did in createGM4BIM)
acc_surf_y = acc[:, -1]
- timeSeries_y = dict(
+ timeSeries_y = dict( # noqa: C408, N806
name='accel_Y',
type='Value',
dT=dT,
data=[y * f_scale for y in acc_surf_y.tolist()],
)
- patterns_y = dict(type='UniformAcceleration', timeSeries='accel_Y', dof=2)
+ patterns_y = dict(type='UniformAcceleration', timeSeries='accel_Y', dof=2) # noqa: C408
# KZ, 01/17/2022: I updated this section accordingly
"""
@@ -193,7 +193,7 @@ def postProcess(evtName, input_units, f_scale_units):
pattern = [patterns]
)
"""
- evts = dict(
+ evts = dict( # noqa: C408
RandomVariables=[],
name='SiteResponseTool',
type='Seismic',
@@ -204,35 +204,35 @@ def postProcess(evtName, input_units, f_scale_units):
pattern=[patterns, patterns_y],
)
- dataToWrite = dict(Events=[evts])
+ dataToWrite = dict(Events=[evts]) # noqa: C408, N806
- with open(evtName, 'w') as outfile:
+ with open(evtName, 'w') as outfile: # noqa: PTH123
json.dump(dataToWrite, outfile, indent=4)
- print('DONE postProcess')
+ print('DONE postProcess') # noqa: T201
return 0
-def run_opensees(
- BIM_file,
- EVENT_file,
+def run_opensees( # noqa: D103
+ BIM_file, # noqa: N803
+ EVENT_file, # noqa: N803
event_path,
model_script,
model_script_path,
ndm,
- getRV,
+ getRV, # noqa: N803
):
- sys.path.insert(0, os.getcwd())
+ sys.path.insert(0, os.getcwd()) # noqa: PTH109
- print('**************** run_opensees ****************')
+ print('**************** run_opensees ****************') # noqa: T201
# load the model builder script
- with open(BIM_file) as f:
- BIM_in = json.load(f)
+ with open(BIM_file) as f: # noqa: PTH123
+ BIM_in = json.load(f) # noqa: N806
model_params = BIM_in['GeneralInformation']
- model_units = BIM_in['GeneralInformation']['units']
- location = BIM_in['GeneralInformation']['location']
+ model_units = BIM_in['GeneralInformation']['units'] # noqa: F841
+ location = BIM_in['GeneralInformation']['location'] # noqa: F841
# convert units if necessary
# KZ, 01/17/2022: Vs30 and DepthToRock are not subjected to the model_units for now...
@@ -253,14 +253,14 @@ def run_opensees(
else:
get_records(BIM_file, EVENT_file, event_path)
# load the event file
- with open(EVENT_file) as f:
- EVENT_in_All = json.load(f)
- EVENT_in = EVENT_in_All['Events'][0]
+ with open(EVENT_file) as f: # noqa: PTH123
+ EVENT_in_All = json.load(f) # noqa: N806
+ EVENT_in = EVENT_in_All['Events'][0] # noqa: N806
event_list = EVENT_in['timeSeries']
- pattern_list = EVENT_in['pattern']
+ pattern_list = EVENT_in['pattern'] # noqa: F841
- fileNames = ['xInput', 'yInput']
+ fileNames = ['xInput', 'yInput'] # noqa: N806
# define the time series
for evt_i, event in enumerate(event_list):
acc = event['data']
@@ -276,13 +276,13 @@ def run_opensees(
# run the analysis
shutil.copyfile(
- os.path.join(model_script_path, model_script),
- os.path.join(os.getcwd(), model_script),
+ os.path.join(model_script_path, model_script), # noqa: PTH118
+ os.path.join(os.getcwd(), model_script), # noqa: PTH109, PTH118
)
build_model(model_params, int(ndm) - 1)
- subprocess.Popen('OpenSees ' + model_script, shell=True).wait()
+ subprocess.Popen('OpenSees ' + model_script, shell=True).wait() # noqa: S602
# FMK
# update Event file with acceleration recorded at surface
@@ -310,11 +310,11 @@ def run_opensees(
postProcess('fmkEVENT', input_units, f_scale_units)
-def get_records(BIM_file, EVENT_file, data_dir):
- with open(BIM_file) as f:
+def get_records(BIM_file, EVENT_file, data_dir): # noqa: N803, D103
+ with open(BIM_file) as f: # noqa: PTH123
bim_file = json.load(f)
- with open(EVENT_file) as f:
+ with open(EVENT_file) as f: # noqa: PTH123
event_file = json.load(f)
event_id = event_file['Events'][0]['event_id']
@@ -324,7 +324,7 @@ def get_records(BIM_file, EVENT_file, data_dir):
event_data = np.array(bim_file['Events']['Events']).T
event_loc = np.where(event_data == event_id)[0][1]
f_scale_user = float(event_data.T[event_loc][1])
- except:
+ except: # noqa: E722
f_scale_user = 1.0
# FMK scale_factor = dict([(evt['fileName'], evt.get('factor',1.0)) for evt in bim_file["Events"]["Events"]])[event_id]
@@ -333,14 +333,14 @@ def get_records(BIM_file, EVENT_file, data_dir):
event_file['Events'][0].update(load_record(event_id, data_dir, scale_factor))
- with open(EVENT_file, 'w') as f:
+ with open(EVENT_file, 'w') as f: # noqa: PTH123
json.dump(event_file, f, indent=2)
-def write_RV(BIM_file, EVENT_file, data_dir):
+def write_RV(BIM_file, EVENT_file, data_dir): # noqa: N802, N803, D103
# Copied from SimCenterEvent, write name of motions
- with open(BIM_file) as f:
+ with open(BIM_file) as f: # noqa: PTH123
bim_data = json.load(f)
event_file = {'randomVariables': [], 'Events': []}
@@ -368,7 +368,7 @@ def write_RV(BIM_file, EVENT_file, data_dir):
}
)
- RV_elements = np.array(events).T[0].tolist()
+ RV_elements = np.array(events).T[0].tolist() # noqa: N806
# RV_elements = []
# for event in events:
# if event['EventClassification'] == 'Earthquake':
@@ -397,16 +397,16 @@ def write_RV(BIM_file, EVENT_file, data_dir):
load_record(events[0][0], data_dir, empty=len(events) > 1)
)
- with open(EVENT_file, 'w') as f:
+ with open(EVENT_file, 'w') as f: # noqa: PTH123
json.dump(event_file, f, indent=2)
-def load_record(fileName, data_dir, scale_factor=1.0, empty=False):
+def load_record(fileName, data_dir, scale_factor=1.0, empty=False): # noqa: FBT002, N803, D103
# Copied from SimCenterEvent, write data of motions into Event
- fileName = fileName.split('x')[0]
+ fileName = fileName.split('x')[0] # noqa: N806
- with open(posixpath.join(data_dir, f'{fileName}.json')) as f:
+ with open(posixpath.join(data_dir, f'{fileName}.json')) as f: # noqa: PTH123
event_data = json.load(f)
event_dic = {
@@ -421,7 +421,7 @@ def load_record(fileName, data_dir, scale_factor=1.0, empty=False):
for i, (src_label, tar_label) in enumerate(
zip(['data_x', 'data_y'], ['accel_X', 'accel_Y'])
):
- if src_label in event_data.keys():
+ if src_label in event_data.keys(): # noqa: SIM118
event_dic['timeSeries'].append(
{
'name': tar_label,
@@ -441,34 +441,34 @@ def load_record(fileName, data_dir, scale_factor=1.0, empty=False):
return event_dic
-def build_model(model_params, numEvt):
+def build_model(model_params, numEvt): # noqa: N803, D103
try:
- depthToRock = model_params['DepthToRock']
- except:
- depthToRock = 0
- Vs30 = model_params['Vs30']
+ depthToRock = model_params['DepthToRock'] # noqa: N806
+ except: # noqa: E722
+ depthToRock = 0 # noqa: N806
+ Vs30 = model_params['Vs30'] # noqa: N806
# Vs30 model
- thickness, Vs = SVM(Vs30, depthToRock, VsRock, elementSize)
+ thickness, Vs = SVM(Vs30, depthToRock, VsRock, elementSize) # noqa: N806
- numElems = len(Vs)
+ numElems = len(Vs) # noqa: N806
# Config model
- f = open('freefield_config.tcl', 'w')
+ f = open('freefield_config.tcl', 'w') # noqa: SIM115, PTH123
f.write('# site response configuration file\n')
f.write(f'set soilThick {thickness:.1f}\n')
f.write(f'set numLayers {numElems:d}\n')
f.write('# layer thickness - bottom to top\n')
- eleVsize = thickness / numElems
- travelTime = 0
+ eleVsize = thickness / numElems # noqa: N806
+ travelTime = 0 # noqa: N806
for ii in range(numElems):
f.write(f'set layerThick({ii + 1:d}) {eleVsize:.2f}\n')
f.write(f'set nElemY({ii + 1:d}) 1\n')
f.write(f'set sElemY({ii + 1:d}) {eleVsize:.3f}\n')
- travelTime += eleVsize / Vs[ii]
+ travelTime += eleVsize / Vs[ii] # noqa: N806
# time averaged shear wave velocity
- averageVs = thickness / travelTime
- naturalFrequency = averageVs / 4 / thickness # Vs/4H
+ averageVs = thickness / travelTime # noqa: N806
+ naturalFrequency = averageVs / 4 / thickness # Vs/4H # noqa: N806
f.write(f'set nElemT {numElems:d}\n')
f.write('# motion file (used if the input arguments do not include motion)\n')
@@ -491,17 +491,17 @@ def build_model(model_params, numEvt):
f.close()
# Create Material
- f = open('freefield_material.tcl', 'w')
+ f = open('freefield_material.tcl', 'w') # noqa: SIM115, PTH123
if model_params['Model'] in 'BA':
# Borja and Amies 1994 J2 model
- rhoSoil = model_params['Den']
+ rhoSoil = model_params['Den'] # noqa: N806
poisson = 0.3
sig_v = rhoSoil * gravityG * eleVsize * 0.5
for ii in range(numElems):
f.write(f'set rho({ii + 1:d}) {rhoSoil:.1f}\n')
- shearG = rhoSoil * Vs[ii] * Vs[ii]
- bulkK = shearG * 2.0 * (1 + poisson) / 3.0 / (1.0 - 2.0 * poisson)
+ shearG = rhoSoil * Vs[ii] * Vs[ii] # noqa: N806
+ bulkK = shearG * 2.0 * (1 + poisson) / 3.0 / (1.0 - 2.0 * poisson) # noqa: N806
f.write(f'set shearG({ii + 1:d}) {shearG:.2f}\n')
f.write(f'set bulkK({ii + 1:d}) {bulkK:.2f}\n')
f.write(
@@ -521,13 +521,13 @@ def build_model(model_params, numEvt):
)
elif model_params['Model'] in 'PIMY':
# PIMY model
- rhoSoil = model_params['Den']
+ rhoSoil = model_params['Den'] # noqa: N806
poisson = 0.3
sig_v = rhoSoil * gravityG * eleVsize * 0.5
for ii in range(numElems):
f.write(f'set rho({numElems - ii:d}) {rhoSoil:.1f}\n')
- shearG = rhoSoil * Vs[ii] * Vs[ii]
- bulkK = shearG * 2.0 * (1 + poisson) / 3.0 / (1.0 - 2.0 * poisson)
+ shearG = rhoSoil * Vs[ii] * Vs[ii] # noqa: N806
+ bulkK = shearG * 2.0 * (1 + poisson) / 3.0 / (1.0 - 2.0 * poisson) # noqa: N806
f.write(f'set Vs({numElems - ii:d}) {Vs[ii]:.2f}\n')
f.write(f'set shearG({numElems - ii:d}) {shearG:.2f}\n')
f.write(f'set bulkK({numElems - ii:d}) {bulkK:.2f}\n')
@@ -553,7 +553,7 @@ def build_model(model_params, numEvt):
f'set mat({numElems - ii:d}) "PressureIndependMultiYield {numElems - ii:d} 3 $rho({numElems - ii:d}) $shearG({numElems - ii:d}) $bulkK({numElems - ii:d}) $su({numElems - ii:d}) 0.1 0.0 2116.0 0.0 31"\n\n\n'
)
else:
- rhoSoil = model_params['Den']
+ rhoSoil = model_params['Den'] # noqa: N806
poisson = 0.3
for ii in range(numElems):
f.write(f'set rho({ii + 1:d}) {rhoSoil:.1f}\n')
@@ -569,15 +569,15 @@ def build_model(model_params, numEvt):
f.close()
-def SVM(Vs30, depthToRock, VsRock, elementSize):
+def SVM(Vs30, depthToRock, VsRock, elementSize): # noqa: N802, N803, D103
# Sediment Velocity Model (SVM)
# Developed by Jian Shi and Domniki Asimaki (2018)
# Generates a shear velocity profile from Vs30 for shallow crust profiles
# Valid for 173.1 m/s < Vs30 < 1000 m/s
# Check Vs30
- if Vs30 < 173.1 or Vs30 > 1000:
- print(f'Caution: Vs30 {Vs30} is not within the valid range of the SVM! \n')
+ if Vs30 < 173.1 or Vs30 > 1000: # noqa: PLR2004
+ print(f'Caution: Vs30 {Vs30} is not within the valid range of the SVM! \n') # noqa: T201
# Parameters specific to: California
z_star = 2.5 # [m] depth considered to have constant Vs
@@ -593,13 +593,13 @@ def SVM(Vs30, depthToRock, VsRock, elementSize):
s4 = -7.6187e-3
# SVM Parameters f(Vs30)
- Vs0 = p1 * (Vs30**2) + p2 * Vs30 + p3
+ Vs0 = p1 * (Vs30**2) + p2 * Vs30 + p3 # noqa: N806
k = np.exp(r1 * (Vs30**r2) + r3)
n = s1 * np.exp(s2 * Vs30) + s3 * np.exp(s4 * Vs30)
# Check element size for max. frequency
- maxFrequency = 50 # Hz
- waveLength = Vs0 / maxFrequency
+ maxFrequency = 50 # Hz # noqa: N806
+ waveLength = Vs0 / maxFrequency # noqa: N806
# Need four elements per wavelength
if 4.0 * elementSize <= waveLength:
step_size = elementSize
@@ -612,7 +612,7 @@ def SVM(Vs30, depthToRock, VsRock, elementSize):
) # discretize depth to bedrock
# Vs Profile
- Vs = np.zeros(len(z))
+ Vs = np.zeros(len(z)) # noqa: N806
Vs[0] = Vs0
for ii in range(1, len(z)):
if z[ii] <= z_star:
@@ -622,9 +622,9 @@ def SVM(Vs30, depthToRock, VsRock, elementSize):
if depthToRock > 0:
thickness = depthToRock
- Vs_cropped = Vs[np.where(z <= depthToRock)]
+ Vs_cropped = Vs[np.where(z <= depthToRock)] # noqa: N806
else:
- Vs_cropped = Vs[np.where(Vs <= VsRock)]
+ Vs_cropped = Vs[np.where(Vs <= VsRock)] # noqa: N806
thickness = z[len(Vs_cropped) - 1] + 0.5 * step_size
if plotFlag:
@@ -633,7 +633,7 @@ def SVM(Vs30, depthToRock, VsRock, elementSize):
fig = plt.figure()
plt.plot(Vs, z, label='Vs profile')
plt.plot(Vs_cropped, z[0 : len(Vs_cropped)], label='Vs profile to bedrock')
- plt.grid(True)
+ plt.grid(True) # noqa: FBT003
ax = plt.gca()
ax.invert_yaxis()
plt.legend()
diff --git a/modules/createEVENT/siteResponse/SiteResponse.py b/modules/createEVENT/siteResponse/SiteResponse.py
index b245c0386..aa024171c 100644
--- a/modules/createEVENT/siteResponse/SiteResponse.py
+++ b/modules/createEVENT/siteResponse/SiteResponse.py
@@ -1,4 +1,4 @@
-import json
+import json # noqa: INP001, D100
import subprocess
import sys
@@ -6,14 +6,14 @@
from postProcess import postProcess
-def main(args):
+def main(args): # noqa: D103
# set filenames
- srtName = args[1]
- evtName = args[3]
+ srtName = args[1] # noqa: N806
+ evtName = args[3] # noqa: N806
- RFflag = False
+ RFflag = False # noqa: N806
- with open(srtName, encoding='utf-8') as json_file:
+ with open(srtName, encoding='utf-8') as json_file: # noqa: PTH123
data = json.load(json_file)
for material in data['Events'][0]['materials']:
@@ -22,15 +22,15 @@ def main(args):
or material['type'] == 'PDMY03_Random'
or material['type'] == 'Elastic_Random'
):
- RFflag = True
+ RFflag = True # noqa: N806
break
if RFflag:
# create material file based on 1D Gaussian field
- soilData = data['Events'][0]
+ soilData = data['Events'][0] # noqa: N806
createMaterial(soilData)
# Run OpenSees
- subprocess.Popen('OpenSees model.tcl', shell=True).wait()
+ subprocess.Popen('OpenSees model.tcl', shell=True).wait() # noqa: S602, S607
# Run postprocessor to create EVENT.json
postProcess(evtName)
diff --git a/modules/createEVENT/siteResponse/calibration.py b/modules/createEVENT/siteResponse/calibration.py
index a3526c52f..0dde81d38 100644
--- a/modules/createEVENT/siteResponse/calibration.py
+++ b/modules/createEVENT/siteResponse/calibration.py
@@ -1,4 +1,4 @@
-import json
+import json # noqa: INP001, D100
import sys
import numpy as np
@@ -7,7 +7,7 @@
from scipy.interpolate import interp1d
-def materialPM4(baseInputs, matTag, fn):
+def materialPM4(baseInputs, matTag, fn): # noqa: N802, N803, D103
fn.write(
'nDMaterial PM4Sand {} {:.3f} {:.2f} {:.3f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} \n'.format(
matTag,
@@ -39,7 +39,7 @@ def materialPM4(baseInputs, matTag, fn):
)
-def materialPDMY03(baseInputs, matTag, fn):
+def materialPDMY03(baseInputs, matTag, fn): # noqa: N802, N803, D103
fn.write(
'nDMaterial PressureDependMultiYield03 {} {} {:.2f} {:.3e} {:.3e} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {} {:.3f} {:.3f} {:.3f} {:.3f} \n'.format(
matTag,
@@ -70,7 +70,7 @@ def materialPDMY03(baseInputs, matTag, fn):
)
-def materialElastic(baseInputs, matTag, fn):
+def materialElastic(baseInputs, matTag, fn): # noqa: N802, N803, D103
fn.write(
'nDMaterial ElasticIsotropic {} {:.3e} {:.3f} {:.2f} \n'.format(
matTag, baseInputs['E'], baseInputs['poisson'], baseInputs['density']
@@ -78,7 +78,7 @@ def materialElastic(baseInputs, matTag, fn):
)
-def calibration(variables, inputParameters, fn):
+def calibration(variables, inputParameters, fn): # noqa: C901, N803, D103
# This function contains two parts: call gauss1D to generate 1D random field; generate material based on random field
# Currently only relative density is supported
# Calibration of PM4Sand is based on a parametric study that produces hpo = f(Dr, Go, CRR)
@@ -86,7 +86,7 @@ def calibration(variables, inputParameters, fn):
if variables['materialType'] == 'PM4Sand_Random':
# PM4Sand
- baseInputs = {
+ baseInputs = { # noqa: N806
'Dr': 0.65,
'Go': 600.0,
'hpo': 0.08,
@@ -114,7 +114,7 @@ def calibration(variables, inputParameters, fn):
}
elif variables['materialType'] == 'PDMY03_Random':
# PDMY03
- baseInputs = {
+ baseInputs = { # noqa: N806
'nd': 2,
'rho': 1.5,
'refShearModul': 4.69e4,
@@ -141,7 +141,7 @@ def calibration(variables, inputParameters, fn):
}
elif variables['materialType'] == 'Elastic_Random':
# Elastic
- baseInputs = {'E': 168480, 'poisson': 0.3, 'density': 2.0}
+ baseInputs = {'E': 168480, 'poisson': 0.3, 'density': 2.0} # noqa: N806
for keys in baseInputs:
baseInputs[keys] = inputParameters[keys]
@@ -149,40 +149,40 @@ def calibration(variables, inputParameters, fn):
# calculate random field
# size of mesh
thickness = variables['thickness']
- waveLength = variables['Ly']
+ waveLength = variables['Ly'] # noqa: N806
# Number of wave number increments in y-direction
- Ny = thickness / waveLength
+ Ny = thickness / waveLength # noqa: N806
rd = gauss1D(thickness, Ny)
rd.calculate()
- F = np.squeeze(rd.f.reshape((-1, 1)))
- Y = np.linspace(0, rd.Ly, rd.My)
+ F = np.squeeze(rd.f.reshape((-1, 1))) # noqa: N806
+ Y = np.linspace(0, rd.Ly, rd.My) # noqa: N806
f = interp1d(Y, F, kind='cubic')
# mapping from random field to mesh
- elemID = np.arange(variables['eleStart'], variables['eleEnd'] + 1, 1)
- elementY = np.linspace(
+ elemID = np.arange(variables['eleStart'], variables['eleEnd'] + 1, 1) # noqa: N806
+ elementY = np.linspace( # noqa: N806
variables['elevationStart'], variables['elevationEnd'], len(elemID)
)
- for matTag in elemID:
+ for matTag in elemID: # noqa: N806
residual = (
variables['mean']
* f(elementY[matTag - variables['eleStart']])
* variables['COV']
)
- print()
+ print() # noqa: T201
if variables['name'] == 'Dr':
# bound Dr between 0.2 and 0.95
- Dr = min(max(0.2, variables['mean'] + residual), 0.95)
- if Dr != Dr:
- Dr = 0.2
+ Dr = min(max(0.2, variables['mean'] + residual), 0.95) # noqa: N806
+ if Dr != Dr: # noqa: PLR0124
+ Dr = 0.2 # noqa: N806
if variables['materialType'] == 'PM4Sand_Random':
baseInputs['Dr'] = Dr
- Go = baseInputs['Go']
+ Go = baseInputs['Go'] # noqa: N806
# CPT and SPT Based Liquefaction Triggering Procedures (Boulanger and Idriss 2014)
- Cd = 46.0
- N160 = Dr**2 * Cd
- CRR_IB = np.exp(
+ Cd = 46.0 # noqa: N806
+ N160 = Dr**2 * Cd # noqa: N806
+ CRR_IB = np.exp( # noqa: N806
N160 / 14.1
+ (N160 / 126) ** 2
- (N160 / 23.6) ** 3
@@ -202,9 +202,9 @@ def calibration(variables, inputParameters, fn):
+ 0.71347 * Dr**2
)
hpo = (-b + np.sqrt(b**2 - 4 * a * c)) / (2 * a)
- if hpo != hpo:
+ if hpo != hpo: # noqa: PLR0124
hpo = 0.4
- CRR_prediction = (
+ CRR_prediction = ( # noqa: N806
0.114
- 0.44844 * Dr
- (4.2648e-5) * Go
@@ -222,13 +222,13 @@ def calibration(variables, inputParameters, fn):
baseInputs['hpo'] = hpo
materialPM4(baseInputs, matTag, fn)
elif variables['materialType'] == 'PDMY03_Random':
- Dr = max(min(Dr, 0.87), 0.33)
+ Dr = max(min(Dr, 0.87), 0.33) # noqa: N806
baseInputs['Dr'] = Dr
# interpolation using Khosravifar, A., Elgamal, A., Lu, J., and Li, J. [2018].
# "A 3D model for earthquake-induced liquefaction triggering and post-liquefaction response."
# Soil Dynamics and Earthquake Engineering, 110, 43-52
drs = np.array([0.33, 0.57, 0.74, 0.87])
- df = pd.DataFrame(
+ df = pd.DataFrame( # noqa: PD901
[
(
46900,
@@ -302,14 +302,14 @@ def calibration(variables, inputParameters, fn):
'dc',
),
)
- for columnName, columnData in df.iteritems():
- f_Dr = interp1d(drs, df[columnName], kind='cubic')
+ for columnName, columnData in df.iteritems(): # noqa: B007, N806
+ f_Dr = interp1d(drs, df[columnName], kind='cubic') # noqa: N806
baseInputs[columnName] = f_Dr(Dr)
materialPDMY03(baseInputs, matTag, fn)
elif variables['name'] == 'Vs':
if variables['materialType'] == 'Elastic_Random':
# bound Dr between 50 and 1500
- Vs = min(max(50, variables['mean'] + residual), 1500)
+ Vs = min(max(50, variables['mean'] + residual), 1500) # noqa: N806
baseInputs['E'] = (
2.0
* baseInputs['density']
@@ -321,28 +321,28 @@ def calibration(variables, inputParameters, fn):
materialElastic(baseInputs, matTag, fn)
-def createMaterial(data):
- eleStart = 0
- eleEnd = 0
- elevationStart = 0
- elevationEnd = 0
- numElems = 0
- totalHeight = 0
- randomMaterialList = ['PM4Sand_Random', 'PDMY03_Random', 'Elastic_Random']
- fn = open('material.tcl', 'w')
+def createMaterial(data): # noqa: N802, D103
+ eleStart = 0 # noqa: N806
+ eleEnd = 0 # noqa: N806
+ elevationStart = 0 # noqa: N806
+ elevationEnd = 0 # noqa: N806
+ numElems = 0 # noqa: N806
+ totalHeight = 0 # noqa: N806
+ randomMaterialList = ['PM4Sand_Random', 'PDMY03_Random', 'Elastic_Random'] # noqa: N806
+ fn = open('material.tcl', 'w') # noqa: SIM115, PTH123
for layer in reversed(data['soilProfile']['soilLayers']):
if layer['eSize'] != 0:
- eleStart = numElems + 1
- numElemsLayer = round(layer['thickness'] / layer['eSize'])
- numElems += numElemsLayer
- eleSize = layer['thickness'] / numElemsLayer
- elevationStart = eleSize / 2.0
- totalHeight += layer['thickness']
- eleEnd = numElems
- elevationEnd = layer['thickness'] - eleSize / 2.0
+ eleStart = numElems + 1 # noqa: N806
+ numElemsLayer = round(layer['thickness'] / layer['eSize']) # noqa: N806
+ numElems += numElemsLayer # noqa: N806
+ eleSize = layer['thickness'] / numElemsLayer # noqa: N806
+ elevationStart = eleSize / 2.0 # noqa: N806
+ totalHeight += layer['thickness'] # noqa: N806
+ eleEnd = numElems # noqa: N806
+ elevationEnd = layer['thickness'] - eleSize / 2.0 # noqa: N806
if data['materials'][layer['material'] - 1]['type'] in randomMaterialList:
- variables = dict(
+ variables = dict( # noqa: C408
materialType=data['materials'][layer['material'] - 1]['type'],
name=data['materials'][layer['material'] - 1]['Variable'],
mean=data['materials'][layer['material'] - 1]['mean'],
@@ -354,20 +354,20 @@ def createMaterial(data):
elevationStart=elevationStart, # location of first Gauss Point respect to layer base
elevationEnd=elevationEnd, # location of last Gauss Point respect to layer base
)
- inputParameters = data['materials'][layer['material'] - 1]
+ inputParameters = data['materials'][layer['material'] - 1] # noqa: N806
calibration(variables, inputParameters, fn)
fn.close()
if __name__ == '__main__':
- srtName = sys.argv[0]
+ srtName = sys.argv[0] # noqa: N816
# data obtained from user input
# define the random field
- with open(srtName) as json_file:
+ with open(srtName) as json_file: # noqa: PTH123
data = json.load(json_file)
- eventData = data['Events'][0]
+ eventData = data['Events'][0] # noqa: N816
createMaterial(eventData)
diff --git a/modules/createEVENT/siteResponse/createGM4BIM.py b/modules/createEVENT/siteResponse/createGM4BIM.py
index bc86fdbee..fa376098c 100644
--- a/modules/createEVENT/siteResponse/createGM4BIM.py
+++ b/modules/createEVENT/siteResponse/createGM4BIM.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
#
# This file is part of the RDT Application.
@@ -45,58 +45,58 @@
import pandas as pd
-def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
- if not os.path.isdir(inputDir):
- print(f'input dir: {inputDir} does not exist')
+def createFilesForEventGrid(inputDir, outputDir, removeInputDir): # noqa: N802, N803, D103
+ if not os.path.isdir(inputDir): # noqa: PTH112
+ print(f'input dir: {inputDir} does not exist') # noqa: T201
return 0
- if not os.path.exists(outputDir):
- os.mkdir(outputDir)
+ if not os.path.exists(outputDir): # noqa: PTH110
+ os.mkdir(outputDir) # noqa: PTH102
- siteFiles = glob(f'{inputDir}/*BIM.json')
+ siteFiles = glob(f'{inputDir}/*BIM.json') # noqa: PTH207, N806
- GP_file = []
- Longitude = []
- Latitude = []
- id = []
+ GP_file = [] # noqa: N806, F841
+ Longitude = [] # noqa: N806
+ Latitude = [] # noqa: N806
+ id = [] # noqa: A001
sites = []
for site in siteFiles:
- with open(site) as f:
- All_json = json.load(f)
- generalInfo = All_json['GeneralInformation']
+ with open(site) as f: # noqa: PTH123
+ All_json = json.load(f) # noqa: N806
+ generalInfo = All_json['GeneralInformation'] # noqa: N806
Longitude.append(generalInfo['Longitude'])
Latitude.append(generalInfo['Latitude'])
- siteID = generalInfo['BIM_id']
+ siteID = generalInfo['BIM_id'] # noqa: N806
id.append(siteID)
- siteFileName = f'Site_{siteID}.csv'
+ siteFileName = f'Site_{siteID}.csv' # noqa: N806
sites.append(siteFileName)
- workdirs = glob(f'{inputDir}/{siteID}/workdir.*')
- siteEventFiles = []
- siteEventFactors = []
+ workdirs = glob(f'{inputDir}/{siteID}/workdir.*') # noqa: PTH207
+ siteEventFiles = [] # noqa: N806
+ siteEventFactors = [] # noqa: N806
for workdir in workdirs:
- head, sep, sampleID = workdir.partition('workdir.')
- print(sampleID)
+ head, sep, sampleID = workdir.partition('workdir.') # noqa: N806
+ print(sampleID) # noqa: T201
- eventName = f'Event_{siteID}_{sampleID}.json'
- print(eventName)
+ eventName = f'Event_{siteID}_{sampleID}.json' # noqa: N806
+ print(eventName) # noqa: T201
shutil.copy(f'{workdir}/fmkEVENT', f'{outputDir}/{eventName}')
siteEventFiles.append(eventName)
siteEventFactors.append(1)
- siteDF = pd.DataFrame(
+ siteDF = pd.DataFrame( # noqa: N806
list(zip(siteEventFiles, siteEventFactors)),
columns=['TH_file', 'factor'],
)
siteDF.to_csv(f'{outputDir}/{siteFileName}', index=False)
# create the EventFile
- gridDF = pd.DataFrame(
+ gridDF = pd.DataFrame( # noqa: N806
list(zip(sites, Longitude, Latitude)),
columns=['GP_file', 'Longitude', 'Latitude'],
)
@@ -113,7 +113,7 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
if __name__ == '__main__':
# Defining the command line arguments
- workflowArgParser = argparse.ArgumentParser(
+ workflowArgParser = argparse.ArgumentParser( # noqa: N816
'Create ground motions for BIM.', allow_abbrev=False
)
@@ -128,8 +128,8 @@ def createFilesForEventGrid(inputDir, outputDir, removeInputDir):
workflowArgParser.add_argument('--removeInput', action='store_true')
# Parsing the command line arguments
- wfArgs = workflowArgParser.parse_args()
+ wfArgs = workflowArgParser.parse_args() # noqa: N816
- print(wfArgs)
+ print(wfArgs) # noqa: T201
# Calling the main function
createFilesForEventGrid(wfArgs.inputDir, wfArgs.outputDir, wfArgs.removeInput)
diff --git a/modules/createEVENT/siteResponse/postProcess.py b/modules/createEVENT/siteResponse/postProcess.py
index d16da6ff1..9313cfefd 100644
--- a/modules/createEVENT/siteResponse/postProcess.py
+++ b/modules/createEVENT/siteResponse/postProcess.py
@@ -1,24 +1,24 @@
-# This script create evt.j for workflow
+# This script create evt.j for workflow # noqa: INP001, D100
import json
import shutil
import numpy as np
-def postProcess(evtName):
+def postProcess(evtName): # noqa: N802, N803, D103
# acc = np.loadtxt("acceleration.out")
# os.remove("acceleration.out") # remove acceleration file to save space
acc = np.loadtxt('out_tcl/acceleration.out')
shutil.rmtree('out_tcl') # remove output files to save space
time = acc[:, 0]
acc_surf = acc[:, -2] / 9.81
- dT = time[1] - time[0]
+ dT = time[1] - time[0] # noqa: N806
- timeSeries = dict(name='accel_X', type='Value', dT=dT, data=acc_surf.tolist())
+ timeSeries = dict(name='accel_X', type='Value', dT=dT, data=acc_surf.tolist()) # noqa: C408, N806
- patterns = dict(type='UniformAcceleration', timeSeries='accel_X', dof=1)
+ patterns = dict(type='UniformAcceleration', timeSeries='accel_X', dof=1) # noqa: C408
- evts = dict(
+ evts = dict( # noqa: C408
RandomVariables=[],
name='SiteResponseTool',
type='Seismic',
@@ -29,9 +29,9 @@ def postProcess(evtName):
pattern=[patterns],
)
- dataToWrite = dict(Events=[evts])
+ dataToWrite = dict(Events=[evts]) # noqa: C408, N806
- with open(evtName, 'w') as outfile:
+ with open(evtName, 'w') as outfile: # noqa: PTH123
json.dump(dataToWrite, outfile, indent=4)
return 0
diff --git a/modules/createEVENT/siteResponse/postProcessRegional.py b/modules/createEVENT/siteResponse/postProcessRegional.py
index 3a9fda0ae..75654fe3b 100644
--- a/modules/createEVENT/siteResponse/postProcessRegional.py
+++ b/modules/createEVENT/siteResponse/postProcessRegional.py
@@ -1,25 +1,25 @@
-# This script create evt.j for workflow
+# This script create evt.j for workflow # noqa: INP001, D100
import json
import os
import numpy as np
-def postProcess(evtName):
+def postProcess(evtName): # noqa: N802, N803, D103
acc = np.loadtxt('acceleration.out')
# remove acceleration file to save space
- os.remove('acceleration.out')
+ os.remove('acceleration.out') # noqa: PTH107
# acc = np.loadtxt("out_tcl/acceleration.out")
# shutil.rmtree("out_tcl") # remove output files to save space
time = acc[:, 0]
acc_surf = acc[:, -2] / 9.81
- dT = time[1] - time[0]
+ dT = time[1] - time[0] # noqa: N806
- timeSeries = dict(name='accel_X', type='Value', dT=dT, data=acc_surf.tolist())
+ timeSeries = dict(name='accel_X', type='Value', dT=dT, data=acc_surf.tolist()) # noqa: C408, N806
- patterns = dict(type='UniformAcceleration', timeSeries='accel_X', dof=1)
+ patterns = dict(type='UniformAcceleration', timeSeries='accel_X', dof=1) # noqa: C408
- evts = dict(
+ evts = dict( # noqa: C408
RandomVariables=[],
name='SiteResponseTool',
type='Seismic',
@@ -30,9 +30,9 @@ def postProcess(evtName):
pattern=[patterns],
)
- dataToWrite = dict(Events=[evts])
+ dataToWrite = dict(Events=[evts]) # noqa: C408, N806
- with open(evtName, 'w') as outfile:
+ with open(evtName, 'w') as outfile: # noqa: PTH123
json.dump(dataToWrite, outfile, indent=4)
return 0
diff --git a/modules/createEVENT/stochasticWave/Ex1_WaveKinematics.py b/modules/createEVENT/stochasticWave/Ex1_WaveKinematics.py
index 4271df8ae..3a2956287 100644
--- a/modules/createEVENT/stochasticWave/Ex1_WaveKinematics.py
+++ b/modules/createEVENT/stochasticWave/Ex1_WaveKinematics.py
@@ -1,8 +1,8 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python3 # noqa: EXE001
"""Plot the wave kinematics (elevation, velocity, acceleration) for linear waves
Different locations, times and superposition of frequencies can be used.
-"""
+""" # noqa: D205
import matplotlib.pyplot as plt
import numpy as np
@@ -11,10 +11,10 @@
from welib.tools.figure import defaultRC
defaultRC()
-from welib.hydro.morison import *
-from welib.hydro.wavekin import *
-from welib.hydro.wavekin import elevation2d, kinematics2d, wavenumber
-from welib.tools.colors import python_colors
+from welib.hydro.morison import * # noqa: E402, F403
+from welib.hydro.wavekin import * # noqa: E402, F403
+from welib.hydro.wavekin import elevation2d, kinematics2d, wavenumber # noqa: E402
+from welib.tools.colors import python_colors # noqa: E402
fig, axes = plt.subplots(2, 2, sharey=False, figsize=(6.4, 4.8)) # (6.4,4.8)
fig.subplots_adjust(
@@ -65,8 +65,8 @@
vel, acc = kinematics2d(a, f, k, eps, h, time, z, x)
# eta = elevation2d(a, f, k, eps, time, x)
ax = axes[1, 0]
-sT = ['0', 'T/4', 'T/2', '3T/4']
-for it, t in enumerate(time[:-1]):
+sT = ['0', 'T/4', 'T/2', '3T/4'] # noqa: N816
+for it, t in enumerate(time[:-1]): # noqa: B007
ax.plot(
vel[:, it],
z,
@@ -74,7 +74,7 @@
c=python_colors(it),
label=f'vel, t={sT[it]}',
)
-for it, t in enumerate(time[:-1]):
+for it, t in enumerate(time[:-1]): # noqa: B007
ax.plot(
acc[:, it],
z,
diff --git a/modules/createEVENT/stochasticWave/Ex2_Jonswap_spectrum.py b/modules/createEVENT/stochasticWave/Ex2_Jonswap_spectrum.py
index 0de43421d..9cd26212d 100644
--- a/modules/createEVENT/stochasticWave/Ex2_Jonswap_spectrum.py
+++ b/modules/createEVENT/stochasticWave/Ex2_Jonswap_spectrum.py
@@ -1,6 +1,6 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python3 # noqa: EXE001
-"""Plot the JONSWAP spectrum for a given sea state"""
+"""Plot the JONSWAP spectrum for a given sea state""" # noqa: D400
import matplotlib.pyplot as plt
import numpy as np
@@ -9,9 +9,9 @@
from welib.tools.figure import defaultRC
defaultRC()
-from welib.hydro.morison import *
-from welib.hydro.spectra import jonswap
-from welib.hydro.wavekin import *
+from welib.hydro.morison import * # noqa: E402, F403
+from welib.hydro.spectra import jonswap # noqa: E402
+from welib.hydro.wavekin import * # noqa: E402, F403
# --- Parameters
t = np.arange(0, 3600.1, 1) # time vector [s]
@@ -20,8 +20,8 @@
Tp = 12.7 # Peak period [s]
# --- Derived parameters
-df = 1.0 / np.max(t) # Step size for frequency
-fMax = (1.0 / dt) / 2 # Highest frequency
+df = 1.0 / np.max(t) # Step size for frequency # noqa: PD901
+fMax = (1.0 / dt) / 2 # Highest frequency # noqa: N816
freq = np.arange(df, fMax + df / 2, df)
# --- Spectrum and amplitude
@@ -29,7 +29,7 @@
ap = np.sqrt(2 * S * df) # Wave amplitude [m]
# Find location of maximum energy
-iMax = np.argmax(S)
+iMax = np.argmax(S) # noqa: N816
# --- Plots
fig, ax = plt.subplots(1, 1, sharey=False, figsize=(6.4, 4.8)) # (6.4,4.8)
diff --git a/modules/createEVENT/stochasticWave/Ex3_WaveTimeSeries.py b/modules/createEVENT/stochasticWave/Ex3_WaveTimeSeries.py
index 5d69505b6..d0455742b 100644
--- a/modules/createEVENT/stochasticWave/Ex3_WaveTimeSeries.py
+++ b/modules/createEVENT/stochasticWave/Ex3_WaveTimeSeries.py
@@ -1,6 +1,6 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python3 # noqa: EXE001
-"""Generate wave time series based on the Jonswap spectrum"""
+"""Generate wave time series based on the Jonswap spectrum""" # noqa: D400
import matplotlib.pyplot as plt
import numpy as np
@@ -10,11 +10,11 @@
from welib.tools.figure import defaultRC
defaultRC()
-from welib.hydro.morison import *
-from welib.hydro.spectra import jonswap
-from welib.hydro.wavekin import *
-from welib.hydro.wavekin import elevation2d, wavenumber
-from welib.tools.spectral import fft_wrap
+from welib.hydro.morison import * # noqa: E402, F403
+from welib.hydro.spectra import jonswap # noqa: E402
+from welib.hydro.wavekin import * # noqa: E402, F403
+from welib.hydro.wavekin import elevation2d, wavenumber # noqa: E402
+from welib.tools.spectral import fft_wrap # noqa: E402
# --- Random seed
seed(None)
@@ -29,8 +29,8 @@
# --- Jonswap spectrum
dt = t[1] - t[0] # timestep [s]
-df = 1 / np.max(t) # step size for frequency
-fHighCut = 1 / (dt) / 2.0 # Highest frequency in calculations
+df = 1 / np.max(t) # step size for frequency # noqa: PD901
+fHighCut = 1 / (dt) / 2.0 # Highest frequency in calculations # noqa: N816
freq = np.arange(df, fHighCut, df)
S = jonswap(freq, Hs, Tp=Tp, g=9.80665)
diff --git a/modules/createEVENT/stochasticWave/Ex4_WaveLoads.py b/modules/createEVENT/stochasticWave/Ex4_WaveLoads.py
index 5d4ec3d31..691dc9672 100644
--- a/modules/createEVENT/stochasticWave/Ex4_WaveLoads.py
+++ b/modules/createEVENT/stochasticWave/Ex4_WaveLoads.py
@@ -1,6 +1,6 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python3 # noqa: EXE001
-"""Compute inline/total hydrodynamic force and moments on a monopile using Morison's equation"""
+"""Compute inline/total hydrodynamic force and moments on a monopile using Morison's equation""" # noqa: D400
import argparse
from fractions import Fraction
@@ -13,9 +13,9 @@
from welib.tools.figure import defaultRC
defaultRC()
-from welib.hydro.morison import *
-from welib.hydro.wavekin import *
-from welib.tools.colors import python_colors
+from welib.hydro.morison import * # noqa: E402, F403
+from welib.hydro.wavekin import * # noqa: E402, F403
+from welib.tools.colors import python_colors # noqa: E402
# --- Parameters
g = 9.81 # gravity [m/s^2]
@@ -28,7 +28,7 @@
T = 12.0 # period [s]
eps = 0 # phase shift [rad]
f = 1.0 / T
-k = wavenumber(f, h, g)
+k = wavenumber(f, h, g) # noqa: F405
nz = 30 # number of points used in the z direction to compute loads
@@ -58,21 +58,21 @@
for it, t in enumerate(time[:-1]):
# Wave kinematics
- eta = elevation2d(a, f, k, eps, t, x=0)
+ eta = elevation2d(a, f, k, eps, t, x=0) # noqa: F405
z = np.linspace(-h, eta, nz)
- u, du = kinematics2d(a, f, k, eps, h, t, z, Wheeler=True, eta=eta)
- u0, du0 = kinematics2d(a, f, k, eps, h, t, z)
+ u, du = kinematics2d(a, f, k, eps, h, t, z, Wheeler=True, eta=eta) # noqa: F405
+ u0, du0 = kinematics2d(a, f, k, eps, h, t, z) # noqa: F405
# Wave loads with wheeler
- p_tot = inline_load(u, du, D, CD, CM, rho)
- p_inertia = inline_load(u, du, D, CD * 0, CM, rho)
- p_drag = inline_load(u, du, D, CD, CM * 0, rho)
- dM = p_tot * (z - z_ref) # [Nm/m]
+ p_tot = inline_load(u, du, D, CD, CM, rho) # noqa: F405
+ p_inertia = inline_load(u, du, D, CD * 0, CM, rho) # noqa: F405
+ p_drag = inline_load(u, du, D, CD, CM * 0, rho) # noqa: F405
+ dM = p_tot * (z - z_ref) # [Nm/m] # noqa: N816
# Wave loads without Wheeler
- p_tot0 = inline_load(u0, du0, D, CD, CM, rho)
- p_inertia0 = inline_load(u0, du0, D, CD * 0, CM, rho)
- p_drag0 = inline_load(u0, du0, D, CD, CM * 0, rho)
- dM0 = p_tot0 * (z - z_ref) # [Nm/m]
+ p_tot0 = inline_load(u0, du0, D, CD, CM, rho) # noqa: F405
+ p_inertia0 = inline_load(u0, du0, D, CD * 0, CM, rho) # noqa: F405
+ p_drag0 = inline_load(u0, du0, D, CD, CM * 0, rho) # noqa: F405
+ dM0 = p_tot0 * (z - z_ref) # [Nm/m] # noqa: N816
# Plot inline force
ax = axes1[int(it / 4), np.mod(it, 4)]
@@ -132,10 +132,10 @@
time = np.linspace(0, 60.0, 6001)
veta = np.zeros(time.shape)
-vF = np.zeros(time.shape)
-vM = np.zeros(time.shape)
-vF0 = np.zeros(time.shape)
-vM0 = np.zeros(time.shape)
+vF = np.zeros(time.shape) # noqa: N816
+vM = np.zeros(time.shape) # noqa: N816
+vF0 = np.zeros(time.shape) # noqa: N816
+vM0 = np.zeros(time.shape) # noqa: N816
XLIM = [-75, 75] # For inline force
XLIMM = [-2500, 2500] # For inline moment
@@ -148,18 +148,18 @@
for it, t in enumerate(time):
# Wave kinematics
- veta[it] = elevation2d(a, f, k, eps, t, x=0)
+ veta[it] = elevation2d(a, f, k, eps, t, x=0) # noqa: F405
z = np.linspace(-h, veta[it], nz)
- u, du = kinematics2d(a, f, k, eps, h, t, z, Wheeler=True, eta=veta[it])
- u0, du0 = kinematics2d(a, f, k, eps, h, t, z)
+ u, du = kinematics2d(a, f, k, eps, h, t, z, Wheeler=True, eta=veta[it]) # noqa: F405
+ u0, du0 = kinematics2d(a, f, k, eps, h, t, z) # noqa: F405
# Wave loads with Wheeler
- p_tot = inline_load(u, du, D, CD, CM, rho)
- vF[it] = np.trapz(p_tot, z) # [N]
- vM[it] = np.trapz(p_tot * (z - z_ref), z) # [Nm]
+ p_tot = inline_load(u, du, D, CD, CM, rho) # noqa: F405
+ vF[it] = np.trapz(p_tot, z) # [N] # noqa: NPY201
+ vM[it] = np.trapz(p_tot * (z - z_ref), z) # [Nm] # noqa: NPY201
# Wave loads without Wheeler
- p_tot0 = inline_load(u0, du0, D, CD, CM, rho)
- vF0[it] = np.trapz(p_tot0, z) # [N]
- vM0[it] = np.trapz(p_tot0 * (z - z_ref), z) # [Nm]
+ p_tot0 = inline_load(u0, du0, D, CD, CM, rho) # noqa: F405
+ vF0[it] = np.trapz(p_tot0, z) # [N] # noqa: NPY201
+ vM0[it] = np.trapz(p_tot0 * (z - z_ref), z) # [Nm] # noqa: NPY201
elevation[it, :] = z.copy()
velocity[it, :] = u.copy()
@@ -174,20 +174,20 @@
# axes[0] = axes[0]
axes[0].plot(time / T, veta, 'k-')
axes[0].set_ylabel('Elevation [m]')
-axes[0].grid(True)
+axes[0].grid(True) # noqa: FBT003
# axes[1] = axes[1]
axes[1].plot(time / T, vF0 / 1e6, label='Standard')
axes[1].plot(time / T, vF / 1e6, 'k-', label='Wheeler Correction')
axes[1].set_ylabel('Streamwise Load, Cumulative [MN]')
axes[1].legend()
-axes[1].grid(True)
+axes[1].grid(True) # noqa: FBT003
# axes[2] = axes[2]
axes[2].plot(time / T, vM0 / 1e6, label='Standard')
axes[2].plot(time / T, vM / 1e6, 'k-', label='Wheeler Correction')
axes[2].set_ylabel('Sea-Bed Moment [MNm]')
axes[2].set_xlabel('Dimensionless Time, t/T [-]')
axes[2].legend()
-axes[2].grid(True)
+axes[2].grid(True) # noqa: FBT003
# fig.savefig('IntegratedPileLoads.png')
# fig.savefig('IntegratedPileLoads.webp')
@@ -251,7 +251,7 @@
# results_df.to_csv('results.out', sep=' ', encoding='utf-8', header=False, index=False)
-def main(df=None):
+def main(df=None): # noqa: D103
return df
@@ -328,7 +328,7 @@ def main(df=None):
# plt.savefig('MorisonLoads.png')
# plt.show()
- print('End of __main__ in Ex4_WaveLoads.py')
+ print('End of __main__ in Ex4_WaveLoads.py') # noqa: T201
main()
diff --git a/modules/createEVENT/stochasticWave/StochasticWave.py b/modules/createEVENT/stochasticWave/StochasticWave.py
index 6a422105f..045946a49 100644
--- a/modules/createEVENT/stochasticWave/StochasticWave.py
+++ b/modules/createEVENT/stochasticWave/StochasticWave.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python3 # noqa: EXE001, D100
import argparse
import json
@@ -7,14 +7,14 @@
from welib.tools.figure import defaultRC
defaultRC()
-from welib.hydro.morison import *
-from welib.hydro.wavekin import *
+from welib.hydro.morison import * # noqa: E402, F403
+from welib.hydro.wavekin import * # noqa: E402, F403
-class FloorForces:
- def __init__(self, recorderID=-1):
+class FloorForces: # noqa: D101
+ def __init__(self, recorderID=-1): # noqa: N803
if recorderID < 0:
- print(
+ print( # noqa: T201
'No recorder ID, or a negative ID, provided, defaulting to 0 for all forces.'
)
self.X = [0.0]
@@ -25,7 +25,7 @@ def __init__(self, recorderID=-1):
self.Y = []
self.Z = []
# prepend zeros to the list to account for the timeSeries transient analysis req in OpenSees
- prependZero = False
+ prependZero = False # noqa: N806
if prependZero:
self.X.append(0.0)
self.Y.append(0.0)
@@ -34,15 +34,15 @@ def __init__(self, recorderID=-1):
# Read in forces.[out or evt] file and add to EVENT.json
# now using intermediary forces.evt for output of preceding Python calcs,
# prevents confusion with forces.out made by FEM tab
- with open('forces.evt') as file:
- print('Reading forces from forces.evt to EVENT.json')
+ with open('forces.evt') as file: # noqa: PTH123
+ print('Reading forces from forces.evt to EVENT.json') # noqa: T201
lines = file.readlines()
j = 0
for line in lines:
# Ensure not empty line
strip_line = line.strip()
if not strip_line:
- print('Empty line found in forces.evt... skip')
+ print('Empty line found in forces.evt... skip') # noqa: T201
continue
# Assume there is no header in the file
# Assume recorder IDs are sequential, starting from 1
@@ -63,7 +63,7 @@ def __init__(self, recorderID=-1):
# must not have empty lists for max and min
if len(self.X) == 0:
- print(
+ print( # noqa: T201
'No forces found in the file for recorder ',
recorderID,
', defaulting to 0.0 for all forces.',
@@ -77,7 +77,7 @@ def __init__(self, recorderID=-1):
self.Y.append(max(self.Y))
self.Z.append(max(self.Z))
- print(
+ print( # noqa: T201
'Length: ',
len(self.X),
', Max force: ',
@@ -96,21 +96,21 @@ def __init__(self, recorderID=-1):
file.close()
-def directionToDof(direction):
- """Converts direction to degree of freedom"""
- directioMap = {'X': 1, 'Y': 2, 'Z': 3}
+def directionToDof(direction): # noqa: N802
+ """Converts direction to degree of freedom""" # noqa: D400, D401
+ directioMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806
return directioMap[direction]
-def addFloorForceToEvent(patternsList, timeSeriesList, force, direction, floor):
+def addFloorForceToEvent(patternsList, timeSeriesList, force, direction, floor): # noqa: N802, N803
"""Add force (one component) time series and pattern in the event file
Use of Wind is just a placeholder for now, since its more developed than Hydro
- """
- seriesName = '1'
- patternName = '1'
- seriesName = 'WindForceSeries_' + str(floor) + direction
- patternName = 'WindForcePattern_' + str(floor) + direction
+ """ # noqa: D205, D400
+ seriesName = '1' # noqa: N806
+ patternName = '1' # noqa: N806
+ seriesName = 'WindForceSeries_' + str(floor) + direction # noqa: N806
+ patternName = 'WindForcePattern_' + str(floor) + direction # noqa: N806
pattern = {
'name': patternName,
@@ -123,7 +123,7 @@ def addFloorForceToEvent(patternsList, timeSeriesList, force, direction, floor):
'dof': directionToDof(direction),
'units': {'force': 'Newton', 'length': 'Meter', 'time': 'Sec'},
}
- sensorData = {
+ sensorData = { # noqa: N806
'name': seriesName,
'pattern': patternName,
'type': 'Value',
@@ -140,31 +140,31 @@ def addFloorForceToEvent(patternsList, timeSeriesList, force, direction, floor):
timeSeriesList.append(sensorData)
-def writeEVENT(forces, eventFilePath='EVENT.json', floorsCount=1):
- """This method writes the EVENT.json file"""
+def writeEVENT(forces, eventFilePath='EVENT.json', floorsCount=1): # noqa: N802, N803
+ """This method writes the EVENT.json file""" # noqa: D400, D401, D404
# Adding floor forces
- patternsArray = []
- timeSeriesArray = []
+ patternsArray = [] # noqa: N806
+ timeSeriesArray = [] # noqa: N806
# timeSeriesType = "Value" # ? saw in old evt files
# pressure = [{"pressure": [0.0, 0.0], "story": 1}]
pressure = []
for it in range(floorsCount):
- floorForces = forces[it]
+ floorForces = forces[it] # noqa: N806
addFloorForceToEvent(
patternsArray, timeSeriesArray, floorForces, 'X', it + 1
)
# subtype = "StochasticWindModel-KwonKareem2006"
- eventClassification = 'Hydro'
- eventType = 'StochasticWave'
- eventSubtype = 'StochasticWaveJonswap'
+ eventClassification = 'Hydro' # noqa: N806
+ eventType = 'StochasticWave' # noqa: N806
+ eventSubtype = 'StochasticWaveJonswap' # noqa: N806, F841
# subtype = "StochasticWaveJonswap" # ?
# timeSeriesName = "HydroForceSeries_1X"
# patternName = "HydroForcePattern_1X"
- hydroEventJson = {
+ hydroEventJson = { # noqa: N806
'type': eventClassification,
'subtype': eventType,
'eventClassification': eventClassification,
@@ -178,24 +178,24 @@ def writeEVENT(forces, eventFilePath='EVENT.json', floorsCount=1):
}
# Creating the event dictionary that will be used to export the EVENT json file
- eventDict = {'randomVariables': [], 'Events': [hydroEventJson]}
+ eventDict = {'randomVariables': [], 'Events': [hydroEventJson]} # noqa: N806
- filePath = eventFilePath
- with open(filePath, 'w', encoding='utf-8') as file:
+ filePath = eventFilePath # noqa: N806
+ with open(filePath, 'w', encoding='utf-8') as file: # noqa: PTH123
json.dump(eventDict, file)
file.close()
-def GetFloorsCount(BIMFilePath):
- filePath = BIMFilePath
- with open(filePath, encoding='utf-8') as file:
+def GetFloorsCount(BIMFilePath): # noqa: N802, N803, D103
+ filePath = BIMFilePath # noqa: N806
+ with open(filePath, encoding='utf-8') as file: # noqa: PTH123
bim = json.load(file)
- file.close
+ file.close # noqa: B018
return int(bim['GeneralInformation']['stories'])
-def main():
+def main(): # noqa: D103
return 0
# """
# Entry point to generate event file using Stochastic Waves
@@ -260,11 +260,11 @@ def main():
# import subprocess
# result = subprocess.run(["python", f"{os.path.realpath(os.path.dirname(__file__))}"+"/Ex4_WaveLoads.py", "-hw", 30.0, "-Tp", 12.7, "-Hs", 5.0, "-Dp", 1.0, "-Cd", 2.1, "-Cm", 2.1, "-nz", floorsCount, "-t", 10.0], stdout=subprocess.PIPE)
- if arguments.getRV == True:
- print('RVs requested in StochasticWave.py')
+ if arguments.getRV == True: # noqa: E712
+ print('RVs requested in StochasticWave.py') # noqa: T201
# Read the number of floors
- floorsCount = GetFloorsCount(arguments.filenameAIM)
- filenameEVENT = arguments.filenameEVENT
+ floorsCount = GetFloorsCount(arguments.filenameAIM) # noqa: N816
+ filenameEVENT = arguments.filenameEVENT # noqa: N816
# exec(open(f"{os.path.realpath(os.path.dirname(__file__))}"+"/Ex1_WaveKinematics.py").read())
# exec(open(f"{os.path.realpath(os.path.dirname(__file__))}"+"/Ex2_Jonswap_spectrum.py").read())
@@ -273,21 +273,21 @@ def main():
forces = []
for i in range(floorsCount):
- forces.append(FloorForces(recorderID=(i + 1)))
+ forces.append(FloorForces(recorderID=(i + 1))) # noqa: PERF401
# write the event file
writeEVENT(forces, filenameEVENT, floorsCount)
else:
- print('No RVs requested in StochasticWave.py')
- filenameEVENT = arguments.filenameEVENT
+ print('No RVs requested in StochasticWave.py') # noqa: T201
+ filenameEVENT = arguments.filenameEVENT # noqa: N816
# exec(open(f"{os.path.realpath(os.path.dirname(__file__))}"+"/Ex1_WaveKinematics.py").read())
# exec(open(f"{os.path.realpath(os.path.dirname(__file__))}"+"/Ex2_Jonswap_spectrum.py").read())
# exec(open(f"{os.path.realpath(os.path.dirname(__file__))}"+"/Ex3_WaveTimeSeries.py").read())
# exec(open(f"{os.path.realpath(os.path.dirname(__file__))}"+"/Ex4_WaveLoads.py").read())
forces = []
- floorsCount = 1
+ floorsCount = 1 # noqa: N816
for i in range(floorsCount):
forces.append(FloorForces(recorderID=(i + 1)))
diff --git a/modules/createEVENT/uniformPEER/gridGroundMoion.py b/modules/createEVENT/uniformPEER/gridGroundMoion.py
index 0eb3cc26a..aba8ccb87 100644
--- a/modules/createEVENT/uniformPEER/gridGroundMoion.py
+++ b/modules/createEVENT/uniformPEER/gridGroundMoion.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2021 Leland Stanford Junior University
# Copyright (c) 2021 The Regents of the University of California
#
@@ -43,7 +43,7 @@
#
-# TODO recommended ranges???
+# TODO recommended ranges??? # noqa: TD002, TD004
# import matplotlib.pyplot as plt
@@ -59,19 +59,19 @@
from scipy.stats import gmean, qmc
-def main(inputArgs, err):
- gms = gmCluster(inputArgs, err)
+def main(inputArgs, err): # noqa: N803, D103
+ gms = gmCluster(inputArgs, err) # noqa: F841
-class gmCluster:
- def __init__(self, inputArgs, err):
+class gmCluster: # noqa: D101
+ def __init__(self, inputArgs, err): # noqa: ARG002, C901, N803, PLR0912, PLR0915
np.random.seed(seed=42)
- curDir = os.path.dirname(__file__)
- gmDataBaseDir = os.path.join(curDir, 'gmdata.json')
- inputJsonPath = inputArgs[1]
+ curDir = os.path.dirname(__file__) # noqa: PTH120, N806
+ gmDataBaseDir = os.path.join(curDir, 'gmdata.json') # noqa: PTH118, N806
+ inputJsonPath = inputArgs[1] # noqa: N806
- with open(inputJsonPath) as fj:
- inputJson = json.load(fj)
+ with open(inputJsonPath) as fj: # noqa: PTH123
+ inputJson = json.load(fj) # noqa: N806
nim = len(inputJson['IM'])
@@ -81,7 +81,7 @@ def __init__(self, inputArgs, err):
im_names = []
im_periods = []
i = 0
- for imName, value in inputJson['IM'].items():
+ for imName, value in inputJson['IM'].items(): # noqa: N806
im_names += [imName]
im_ub[i] = float(value['upperBound'])
im_lb[i] = float(value['lowerBound'])
@@ -94,25 +94,25 @@ def __init__(self, inputArgs, err):
+ imName
+ ' should be smaller than upperbound'
)
- print(msg)
- print(im_ub[i])
- print(im_lb[i])
+ print(msg) # noqa: T201
+ print(im_ub[i]) # noqa: T201
+ print(im_lb[i]) # noqa: T201
errf.write(msg)
errf.close()
- exit(-1)
+ exit(-1) # noqa: PLR1722
- i += 1
+ i += 1 # noqa: SIM113
npergrid = int(inputJson['numSampPerBin'])
- # TODO: Convert the units... Or fix the units......
+ # TODO: Convert the units... Or fix the units...... # noqa: TD002
# nim = len(im_names)
ngrid = np.prod(im_nbins)
#
# Clustring parameters
#
- numEQmax = int(
+ numEQmax = int( # noqa: N806
max(1, round(ngrid / 10))
) # Maximum number of records from the single earthquake
# numEQmax = 1
@@ -134,7 +134,7 @@ def __init__(self, inputArgs, err):
if im_names[ni].startswith('PSA') or im_names[ni].startswith('PGA'):
# scaling anchor
if not found_scaling_anchor:
- id_im_scaling_ancher = ni # TODO
+ id_im_scaling_ancher = ni # TODO # noqa: TD002, TD004
found_scaling_anchor = True
nim_eff = nim - 1
@@ -143,54 +143,54 @@ def __init__(self, inputArgs, err):
for ni in range(len(im_names)):
if im_names[ni].startswith('PG') or im_names[ni].startswith('Ia'):
if not found_scaling_anchor:
- id_im_scaling_ancher = ni # TODO
+ id_im_scaling_ancher = ni # TODO # noqa: TD002, TD004
found_scaling_anchor = True
nim_eff = nim - 1
if nim <= 0:
# ERROR
msg = 'number of IMs should be greater than 1'
- print(msg)
+ print(msg) # noqa: T201
errf.write(msg)
errf.close()
- exit(-1)
+ exit(-1) # noqa: PLR1722
elif nim_eff == 0:
# One variable we have is the scaling anchor
- myID = [1]
- Scaling_ref = np.linspace(log_im_lb[0], log_im_ub[0], int(im_nbins[0]))
- IM_log_ref = np.zeros(0) # dummy
- isGrid = True
+ myID = [1] # noqa: N806
+ Scaling_ref = np.linspace(log_im_lb[0], log_im_ub[0], int(im_nbins[0])) # noqa: N806
+ IM_log_ref = np.zeros(0) # dummy # noqa: N806
+ isGrid = True # noqa: N806
elif nim_eff == 1:
if found_scaling_anchor:
if found_scaling_anchor:
- myID = np.delete([0, 1], id_im_scaling_ancher)
- Scaling_ref = np.linspace(
+ myID = np.delete([0, 1], id_im_scaling_ancher) # noqa: N806
+ Scaling_ref = np.linspace( # noqa: N806
log_im_lb[id_im_scaling_ancher],
log_im_ub[id_im_scaling_ancher],
int(im_nbins[id_im_scaling_ancher]),
)
else:
- myID = [0]
- X = np.linspace(
+ myID = [0] # noqa: N806
+ X = np.linspace( # noqa: N806
log_im_lb[myID[0]], log_im_ub[myID[0]], int(im_nbins[myID[0]])
)
- IM_log_ref = X[np.newaxis].T
- isGrid = True
+ IM_log_ref = X[np.newaxis].T # noqa: N806
+ isGrid = True # noqa: N806
- elif nim_eff == 2:
+ elif nim_eff == 2: # noqa: PLR2004
if found_scaling_anchor:
- myID = np.delete([0, 1, 2], id_im_scaling_ancher)
- Scaling_ref = np.linspace(
+ myID = np.delete([0, 1, 2], id_im_scaling_ancher) # noqa: N806
+ Scaling_ref = np.linspace( # noqa: N806
log_im_lb[id_im_scaling_ancher],
log_im_ub[id_im_scaling_ancher],
int(im_nbins[id_im_scaling_ancher]),
)
else:
- myID = [0, 1]
+ myID = [0, 1] # noqa: N806
- X, Y = np.meshgrid(
+ X, Y = np.meshgrid( # noqa: N806
np.linspace(
log_im_lb[myID[0]], log_im_ub[myID[0]], int(im_nbins[myID[0]])
),
@@ -198,20 +198,20 @@ def __init__(self, inputArgs, err):
log_im_lb[myID[1]], log_im_ub[myID[1]], int(im_nbins[myID[1]])
),
)
- IM_log_ref = np.vstack([X.reshape(-1), Y.reshape(-1)]).T
- isGrid = True
- elif nim_eff == 3:
+ IM_log_ref = np.vstack([X.reshape(-1), Y.reshape(-1)]).T # noqa: N806
+ isGrid = True # noqa: N806
+ elif nim_eff == 3: # noqa: PLR2004
if found_scaling_anchor:
- myID = np.delete([0, 1, 2, 3], id_im_scaling_ancher)
- Scaling_ref = np.linspace(
+ myID = np.delete([0, 1, 2, 3], id_im_scaling_ancher) # noqa: N806
+ Scaling_ref = np.linspace( # noqa: N806
log_im_lb[id_im_scaling_ancher],
log_im_ub[id_im_scaling_ancher],
int(im_nbins[id_im_scaling_ancher]),
)
else:
- myID = [0, 1, 2]
+ myID = [0, 1, 2] # noqa: N806
- X, Y, Z = np.meshgrid(
+ X, Y, Z = np.meshgrid( # noqa: N806
np.linspace(
log_im_lb[myID[0]], log_im_ub[myID[0]], int(im_nbins[myID[0]])
),
@@ -222,49 +222,49 @@ def __init__(self, inputArgs, err):
log_im_lb[myID[2]], log_im_ub[myID[2]], int(im_nbins[myID[2]])
),
)
- IM_log_ref = np.vstack([X.reshape(-1), Y.reshape(-1), Z.reshape(-1)]).T
- isGrid = True
+ IM_log_ref = np.vstack([X.reshape(-1), Y.reshape(-1), Z.reshape(-1)]).T # noqa: N806
+ isGrid = True # noqa: N806
else:
if found_scaling_anchor:
- myID = np.delete(range(nim_eff + 1), id_im_scaling_ancher)
- Scaling_ref = np.linspace(
+ myID = np.delete(range(nim_eff + 1), id_im_scaling_ancher) # noqa: N806
+ Scaling_ref = np.linspace( # noqa: N806
log_im_lb[id_im_scaling_ancher],
log_im_ub[id_im_scaling_ancher],
int(im_nbins[id_im_scaling_ancher]),
)
else:
- myID = range(nim_eff)
+ myID = range(nim_eff) # noqa: N806
# Let us do LHS sampling
sampler = qmc.LatinHypercube(d=nim)
- U = sampler.random(n=ngrid)
- X = np.zeros((ngrid, nim_eff))
+ U = sampler.random(n=ngrid) # noqa: N806
+ X = np.zeros((ngrid, nim_eff)) # noqa: N806
for i in range(nim_eff):
X[:, i] = (
U[:, i] * (log_im_ub[myID[i]] - log_im_lb[myID[i]])
+ log_im_lb[myID[i]]
)
- IM_log_ref = X
- isGrid = False
+ IM_log_ref = X # noqa: N806
+ isGrid = False # noqa: N806
#
# Read Database
#
- with open(gmDataBaseDir) as fd:
- gmData = json.load(fd)
-
- RSN = gmData['RSN']
- geomPSA = gmData['geomPSA']
- geomPGA = gmData['geomPGA']
- geomPGV = gmData['geomPGV']
- geomPGD = gmData['geomPGD']
- geomDS575 = gmData['geomDS575']
- geomDS595 = gmData['geomDS595']
- geomIa = gmData['geomIa']
+ with open(gmDataBaseDir) as fd: # noqa: PTH123
+ gmData = json.load(fd) # noqa: N806
+
+ RSN = gmData['RSN'] # noqa: N806
+ geomPSA = gmData['geomPSA'] # noqa: N806
+ geomPGA = gmData['geomPGA'] # noqa: N806
+ geomPGV = gmData['geomPGV'] # noqa: N806
+ geomPGD = gmData['geomPGD'] # noqa: N806
+ geomDS575 = gmData['geomDS575'] # noqa: N806
+ geomDS595 = gmData['geomDS595'] # noqa: N806
+ geomIa = gmData['geomIa'] # noqa: N806
periods = gmData['period']
numgm = gmData['numgm']
- eqnameID = gmData['eqnameID']
+ eqnameID = gmData['eqnameID'] # noqa: N806
units = gmData['unit']
#
@@ -277,30 +277,30 @@ def __init__(self, inputArgs, err):
# Compute SaRatio(T_lowbound,T_cond,T_highbound) and Ds575
#
- IM_log_data_pool = np.zeros((numgm, 0))
+ IM_log_data_pool = np.zeros((numgm, 0)) # noqa: N806
scaling_exponent = np.zeros((nim,))
myunits = []
for ni in range(nim):
if im_names[ni].startswith('PSA'):
- Sa_T1 = np.zeros((numgm,))
- T_cond = float(im_periods[ni][0]) # central (<= 5.0)
+ Sa_T1 = np.zeros((numgm,)) # noqa: N806
+ T_cond = float(im_periods[ni][0]) # central (<= 5.0) # noqa: N806
for ng in range(numgm):
Sa_T1[ng] = np.interp(T_cond, periods, geomPSA[ng])
- Sa1_pool = Sa_T1[np.newaxis].T
- IM_log_data_pool = np.hstack([IM_log_data_pool, np.log(Sa1_pool)])
+ Sa1_pool = Sa_T1[np.newaxis].T # noqa: N806
+ IM_log_data_pool = np.hstack([IM_log_data_pool, np.log(Sa1_pool)]) # noqa: N806
scaling_exponent[ni] = 1
myunits += ['(' + units['PSA'] + ')']
elif im_names[ni] == 'SaRatio':
- Sa_T1 = np.zeros((numgm,))
- Sa_T_geomean = np.zeros((numgm,))
+ Sa_T1 = np.zeros((numgm,)) # noqa: N806
+ Sa_T_geomean = np.zeros((numgm,)) # noqa: N806
- T_lowbound = float(im_periods[ni][0]) # low-bound
- T_cond = float(im_periods[ni][1]) # central (<= 5.0)
- T_highbound = float(im_periods[ni][2]) # high-bound
+ T_lowbound = float(im_periods[ni][0]) # low-bound # noqa: N806
+ T_cond = float(im_periods[ni][1]) # central (<= 5.0) # noqa: N806
+ T_highbound = float(im_periods[ni][2]) # high-bound # noqa: N806
- idx_T_range = np.where(
+ idx_T_range = np.where( # noqa: N806
(np.array(periods) > T_lowbound)
* (np.array(periods) < T_highbound)
)[0]
@@ -311,8 +311,8 @@ def __init__(self, inputArgs, err):
np.array(geomPSA[ng])[idx_T_range.astype(int)]
)
- SaRatio_pool = (Sa_T1 / Sa_T_geomean)[np.newaxis].T
- IM_log_data_pool = np.hstack(
+ SaRatio_pool = (Sa_T1 / Sa_T_geomean)[np.newaxis].T # noqa: N806
+ IM_log_data_pool = np.hstack( # noqa: N806
[IM_log_data_pool, np.log(SaRatio_pool)]
)
scaling_exponent[ni] = 0
@@ -320,55 +320,55 @@ def __init__(self, inputArgs, err):
myunits += ['']
elif im_names[ni] == 'DS575':
ds_pool = (np.array(geomDS575))[np.newaxis].T
- IM_log_data_pool = np.hstack([IM_log_data_pool, np.log(ds_pool)])
+ IM_log_data_pool = np.hstack([IM_log_data_pool, np.log(ds_pool)]) # noqa: N806
scaling_exponent[ni] = 0
myunits += ['(' + units['DS575'] + ')']
elif im_names[ni] == 'DS595':
ds_pool = (np.array(geomDS595))[np.newaxis].T
- IM_log_data_pool = np.hstack([IM_log_data_pool, np.log(ds_pool)])
+ IM_log_data_pool = np.hstack([IM_log_data_pool, np.log(ds_pool)]) # noqa: N806
scaling_exponent[ni] = 0
myunits += ['(' + units['DS595'] + ')']
elif im_names[ni] == 'PGA':
pg_pool = (np.array(geomPGA))[np.newaxis].T
- IM_log_data_pool = np.hstack([IM_log_data_pool, np.log(pg_pool)])
+ IM_log_data_pool = np.hstack([IM_log_data_pool, np.log(pg_pool)]) # noqa: N806
scaling_exponent[ni] = 1
myunits += ['(' + units['PGA'] + ')']
elif im_names[ni] == 'PGV':
pg_pool = (np.array(geomPGV))[np.newaxis].T
- IM_log_data_pool = np.hstack([IM_log_data_pool, np.log(pg_pool)])
+ IM_log_data_pool = np.hstack([IM_log_data_pool, np.log(pg_pool)]) # noqa: N806
scaling_exponent[ni] = 1
myunits += ['(' + units['PGV'] + ')']
elif im_names[ni] == 'PGD':
pg_pool = (np.array(geomPGD))[np.newaxis].T
- IM_log_data_pool = np.hstack([IM_log_data_pool, np.log(pg_pool)])
+ IM_log_data_pool = np.hstack([IM_log_data_pool, np.log(pg_pool)]) # noqa: N806
scaling_exponent[ni] = 1
myunits += ['(' + units['PGD'] + ')']
elif im_names[ni] == 'Ia':
ai_pool = (np.array(geomIa))[np.newaxis].T
- IM_log_data_pool = np.hstack([IM_log_data_pool, np.log(ai_pool)])
+ IM_log_data_pool = np.hstack([IM_log_data_pool, np.log(ai_pool)]) # noqa: N806
scaling_exponent[ni] = 2
myunits += ['(' + units['Ia'] + ')']
else:
msg = 'unrecognized IM name ' + im_names[ni]
- print(msg)
+ print(msg) # noqa: T201
errf.write(msg)
errf.close()
- exit(-1)
+ exit(-1) # noqa: PLR1722
if found_scaling_anchor:
- IM_log_data_scaling_anchor = IM_log_data_pool[:, id_im_scaling_ancher]
+ IM_log_data_scaling_anchor = IM_log_data_pool[:, id_im_scaling_ancher] # noqa: N806
# IM_log_ref_scaling_anchor = IM_log_ref[:,id_im_scaling_ancher]
- IM_log_ref_scaling_anchor = Scaling_ref
+ IM_log_ref_scaling_anchor = Scaling_ref # noqa: N806
- IM_log_data_pool2 = np.delete(
+ IM_log_data_pool2 = np.delete( # noqa: N806
IM_log_data_pool.copy(), id_im_scaling_ancher, 1
)
- IM_log_ref2 = IM_log_ref.copy()
+ IM_log_ref2 = IM_log_ref.copy() # noqa: N806
scaling_exponent = (
scaling_exponent / scaling_exponent[id_im_scaling_ancher]
@@ -377,24 +377,24 @@ def __init__(self, inputArgs, err):
scaling_exponent.copy(), id_im_scaling_ancher
)
log_im_range2 = np.delete(log_im_range.copy(), id_im_scaling_ancher)
- lenRef2 = np.mean(1 / np.delete(im_nbins.copy(), id_im_scaling_ancher))
+ lenRef2 = np.mean(1 / np.delete(im_nbins.copy(), id_im_scaling_ancher)) # noqa: N806
else:
- IM_log_data_pool2 = IM_log_data_pool
- IM_log_ref2 = IM_log_ref
+ IM_log_data_pool2 = IM_log_data_pool # noqa: N806
+ IM_log_ref2 = IM_log_ref # noqa: N806
scaling_exponent2 = scaling_exponent
log_im_range2 = log_im_range
- lenRef2 = np.linalg.norm(1 / im_nbins)
+ lenRef2 = np.linalg.norm(1 / im_nbins) # noqa: N806
if id_im_scaling_ancher >= 0:
if isGrid:
- nScalingGrid = im_nbins[id_im_scaling_ancher]
- nGridPerIM = ngrid / im_nbins[id_im_scaling_ancher]
+ nScalingGrid = im_nbins[id_im_scaling_ancher] # noqa: N806
+ nGridPerIM = ngrid / im_nbins[id_im_scaling_ancher] # noqa: N806
else:
- nScalingGrid = ngrid
- nGridPerIM = ngrid / im_nbins[id_im_scaling_ancher]
+ nScalingGrid = ngrid # noqa: N806
+ nGridPerIM = ngrid / im_nbins[id_im_scaling_ancher] # noqa: N806
else:
- nScalingGrid = 1
- nGridPerIM = ngrid
+ nScalingGrid = 1 # noqa: N806
+ nGridPerIM = ngrid # noqa: N806
sf_min = 0.5 # minimum of no-panalty scaling
sf_max = 10.0 # maximum of no-pad nalty scaling
@@ -404,9 +404,9 @@ def __init__(self, inputArgs, err):
# selected_gm_err_list =[]
# selected_gm_eqID_list =[]
# selected_gm_scale_list =[]
- selected_gm_ID = []
+ selected_gm_ID = [] # noqa: N806
selected_gm_err = []
- selected_gm_eqID = []
+ selected_gm_eqID = [] # noqa: N806
selected_gm_scale = []
err_sum = np.zeros((int(nScalingGrid), int(nGridPerIM)))
@@ -434,8 +434,8 @@ def __init__(self, inputArgs, err):
penalty_pool = np.zeros((numgm,))
else:
- SaT_ref = np.exp(IM_log_ref_scaling_anchor[nsa])
- Sa_T1 = np.exp(IM_log_data_scaling_anchor)
+ SaT_ref = np.exp(IM_log_ref_scaling_anchor[nsa]) # noqa: N806
+ Sa_T1 = np.exp(IM_log_data_scaling_anchor) # noqa: N806
# penalty for scaling factor
@@ -447,12 +447,12 @@ def __init__(self, inputArgs, err):
penalty_pool[temptag2] = (sf_max - sf_pool[temptag2]) ** 2
if IM_log_data_pool2.shape[1] > 0:
- IM_log_data_pool3 = (
+ IM_log_data_pool3 = ( # noqa: N806
IM_log_data_pool2
+ np.log(sf_pool[np.newaxis]).T * scaling_exponent2[np.newaxis]
)
- normData = IM_log_data_pool3 / log_im_range2
- normRefGrid = IM_log_ref2 / log_im_range2
+ normData = IM_log_data_pool3 / log_im_range2 # noqa: N806
+ normRefGrid = IM_log_ref2 / log_im_range2 # noqa: N806
err_mat = (
distance_matrix(normData, normRefGrid, p=2) ** 2 / lenRef2**2
+ np.tile(penalty_pool, (int(nGridPerIM), 1)).T * sf_penalty
@@ -469,8 +469,8 @@ def __init__(self, inputArgs, err):
count = 0
for ng in minerr_tag[:, ngr]:
- cureqID = eqnameID[ng]
- cureqID_existnum = np.sum(cureqID == np.array(selected_gm_eqID))
+ cureqID = eqnameID[ng] # noqa: N806
+ cureqID_existnum = np.sum(cureqID == np.array(selected_gm_eqID)) # noqa: N806
if (selected_gm_ID.count(ng) == 0) and (cureqID_existnum < numEQmax):
break # we only consider this
@@ -478,21 +478,21 @@ def __init__(self, inputArgs, err):
count += 1
if ng == minerr_tag[-1, ngr]:
msg = 'not enough ground motion to match your criteria'
- print(msg)
+ print(msg) # noqa: T201
errf.write(msg)
errf.close()
- exit(-1)
+ exit(-1) # noqa: PLR1722
- selected_gm_ID += [ng]
+ selected_gm_ID += [ng] # noqa: N806
selected_gm_err += [minerr[count, ngr]]
- selected_gm_eqID += [cureqID]
+ selected_gm_eqID += [cureqID] # noqa: N806
selected_gm_scale += [sf_pool[ng]]
err_sum[nsa, ngr] += err_pure[ng, ngr]
- flat_gm_ID = selected_gm_ID
+ flat_gm_ID = selected_gm_ID # noqa: N806
flat_gm_scale = selected_gm_scale
- flat_RSN = [RSN[myid] for myid in flat_gm_ID]
+ flat_RSN = [RSN[myid] for myid in flat_gm_ID] # noqa: N806, F841
#
# Write the results
@@ -502,7 +502,7 @@ def __init__(self, inputArgs, err):
my_results['gm_RSN'] = [int(RSN[int(flat_gm_ID[myid])]) for myid in idx]
my_results['gm_scale'] = [flat_gm_scale[myid] for myid in idx]
- with open('gridIM_output.json', 'w') as f:
+ with open('gridIM_output.json', 'w') as f: # noqa: PTH123
f.write(json.dumps(my_results))
#
@@ -522,12 +522,12 @@ def __init__(self, inputArgs, err):
# except:
# pass
- theLogIM = []
- LogIMref = []
+ theLogIM = [] # noqa: N806
+ LogIMref = [] # noqa: N806
for idx in range(nim):
- theLogSF = np.log(np.array(selected_gm_scale) ** scaling_exponent[idx])
- theLogIM += [np.array(IM_log_data_pool[selected_gm_ID, idx]) + theLogSF]
- LogIMref += [
+ theLogSF = np.log(np.array(selected_gm_scale) ** scaling_exponent[idx]) # noqa: N806
+ theLogIM += [np.array(IM_log_data_pool[selected_gm_ID, idx]) + theLogSF] # noqa: N806
+ LogIMref += [ # noqa: N806
np.linspace(log_im_lb[idx], log_im_ub[idx], int(im_nbins[idx]))
]
@@ -538,7 +538,7 @@ def __init__(self, inputArgs, err):
colorscale = [[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]
- if nim == 3:
+ if nim == 3: # noqa: PLR2004
flat_grid_error = err_sum.T.flatten() / npergrid
if found_scaling_anchor:
@@ -555,7 +555,7 @@ def __init__(self, inputArgs, err):
# reference points
#
- X, Y, Z = np.meshgrid(LogIMref[idx1], LogIMref[idx2], LogIMref[idx3])
+ X, Y, Z = np.meshgrid(LogIMref[idx1], LogIMref[idx2], LogIMref[idx3]) # noqa: N806
fig = px.scatter_3d(
x=np.exp(X.reshape(-1)),
@@ -569,9 +569,9 @@ def __init__(self, inputArgs, err):
)
fig.update_traces(
- marker=dict(
+ marker=dict( # noqa: C408
size=7,
- line=dict(width=2),
+ line=dict(width=2), # noqa: C408
)
)
@@ -590,17 +590,17 @@ def __init__(self, inputArgs, err):
y=np.exp(theLogIM[idx2]),
z=np.exp(theLogIM[idx3]),
mode='markers',
- marker=dict(
+ marker=dict( # noqa: C408
size=4,
- line=dict(width=1, color='black'),
+ line=dict(width=1, color='black'), # noqa: C408
color='orange',
),
name='selected ground motion',
)
fig.update_layout(
- scene=dict(
- xaxis=dict(
+ scene=dict( # noqa: C408
+ xaxis=dict( # noqa: C408
tickmode='array',
# tickvals=[im_lb[idx1],im_ub[idx1],0.001,0.01,0.1,1,10,100],),
tickvals=[
@@ -620,7 +620,7 @@ def __init__(self, inputArgs, err):
],
title=im_names[idx1] + myunits[idx1],
),
- yaxis=dict(
+ yaxis=dict( # noqa: C408
tickmode='array',
# tickvals=[im_lb[idx2],im_ub[idx2],0.001,0.01,0.1,1,10,100],),
tickvals=[
@@ -640,7 +640,7 @@ def __init__(self, inputArgs, err):
],
title=im_names[idx2] + myunits[idx2],
),
- zaxis=dict(
+ zaxis=dict( # noqa: C408
tickmode='array',
# tickvals=[im_lb[idx3],im_ub[idx3],0.001,0.01,0.1,1,10,100],),
tickvals=[
@@ -662,7 +662,7 @@ def __init__(self, inputArgs, err):
),
aspectmode='cube',
),
- legend=dict(
+ legend=dict( # noqa: C408
x=0,
y=0,
xanchor='left',
@@ -673,8 +673,8 @@ def __init__(self, inputArgs, err):
height=500,
width=550,
legend_orientation='h',
- scene_camera=dict(eye=dict(x=2, y=2, z=0.6)),
- margin=dict(l=20, r=20, t=20, b=20),
+ scene_camera=dict(eye=dict(x=2, y=2, z=0.6)), # noqa: C408
+ margin=dict(l=20, r=20, t=20, b=20), # noqa: C408
)
"""
@@ -715,7 +715,7 @@ def __init__(self, inputArgs, err):
ax.view_init(10, 30)
"""
- if nim == 2:
+ if nim == 2: # noqa: PLR2004
flat_grid_error = err_sum.flatten() / npergrid
idx1 = 0
@@ -725,15 +725,15 @@ def __init__(self, inputArgs, err):
# data points
#
- X, Y = np.meshgrid(LogIMref[idx1], LogIMref[idx2])
+ X, Y = np.meshgrid(LogIMref[idx1], LogIMref[idx2]) # noqa: N806
#
# interpolated area
#
- lowerboundX = np.min(np.log(im_lb[0]) - log_im_range[0] * 0.05)
- upperboundX = np.max(np.log(im_ub[0]) + log_im_range[0] * 0.05)
- lowerboundY = np.min(np.log(im_lb[1]) - log_im_range[1] * 0.05)
- upperboundY = np.max(np.log(im_ub[1]) + log_im_range[1] * 0.05)
+ lowerboundX = np.min(np.log(im_lb[0]) - log_im_range[0] * 0.05) # noqa: N806
+ upperboundX = np.max(np.log(im_ub[0]) + log_im_range[0] * 0.05) # noqa: N806
+ lowerboundY = np.min(np.log(im_lb[1]) - log_im_range[1] * 0.05) # noqa: N806
+ upperboundY = np.max(np.log(im_ub[1]) + log_im_range[1] * 0.05) # noqa: N806
xx = np.linspace(lowerboundX, upperboundX, 20)
yy = np.linspace(lowerboundY, upperboundY, 20)
@@ -756,9 +756,9 @@ def __init__(self, inputArgs, err):
color_continuous_scale=colorscale,
)
fig.update_traces(
- marker=dict(
+ marker=dict( # noqa: C408
size=15,
- line=dict(width=2, color='black'),
+ line=dict(width=2, color='black'), # noqa: C408
)
)
@@ -769,9 +769,9 @@ def __init__(self, inputArgs, err):
x=np.exp(theLogIM[idx1]),
y=np.exp(theLogIM[idx2]),
mode='markers',
- marker=dict(
+ marker=dict( # noqa: C408
size=5,
- line=dict(width=1, color='black'),
+ line=dict(width=1, color='black'), # noqa: C408
color='orange',
),
name='selected ground motion',
@@ -795,7 +795,7 @@ def __init__(self, inputArgs, err):
)
fig.update_layout(
- xaxis=dict(
+ xaxis=dict( # noqa: C408
tickmode='array',
# tickvals=[im_lb[idx1],im_ub[idx1],0.001,0.01,0.1,1,10,100],),
tickvals=[
@@ -815,7 +815,7 @@ def __init__(self, inputArgs, err):
],
title=im_names[idx1] + myunits[idx1],
),
- yaxis=dict(
+ yaxis=dict( # noqa: C408
tickmode='array',
# tickvals=[im_lb[idx2],im_ub[idx2],0.001,0.01,0.1,1,10,100],),
tickvals=[
@@ -835,7 +835,7 @@ def __init__(self, inputArgs, err):
],
title=im_names[idx2] + myunits[idx2],
),
- legend=dict(
+ legend=dict( # noqa: C408
x=0,
y=-0.1,
xanchor='left',
@@ -846,7 +846,7 @@ def __init__(self, inputArgs, err):
height=500,
width=550,
legend_orientation='h',
- margin=dict(l=20, r=20, t=20, b=20),
+ margin=dict(l=20, r=20, t=20, b=20), # noqa: C408
)
fig.update_coloraxes(
cmin=0,
@@ -948,14 +948,14 @@ def __init__(self, inputArgs, err):
# plt.xlabel(im_names[idx1]);
# plt.savefig('gridIM_coverage.png',bbox_inches='tight')
- if nim == 2 or nim == 3:
- with open(r'gridIM_coverage.html', 'w') as f:
+ if nim == 2 or nim == 3: # noqa: PLR1714, PLR2004
+ with open(r'gridIM_coverage.html', 'w') as f: # noqa: PTH123
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn'))
f.close()
if __name__ == '__main__':
- errf = open('gridIM_log.err', 'w')
+ errf = open('gridIM_log.err', 'w') # noqa: SIM115, PTH123
main(sys.argv, errf)
# try:
# main(sys.argv,errf)
diff --git a/modules/createSAM/AutoSDA/beam_component.py b/modules/createSAM/AutoSDA/beam_component.py
index b731943c9..ae245e5d5 100644
--- a/modules/createSAM/AutoSDA/beam_component.py
+++ b/modules/createSAM/AutoSDA/beam_component.py
@@ -1,4 +1,4 @@
-# This file is used to define the class of beam, which includes the axial, shear, and flexural strengths of column
+# This file is used to define the class of beam, which includes the axial, shear, and flexural strengths of column # noqa: INP001, D100
# Developed by GUAN, XINGQUAN @ UCLA in Apr. 2018
# Updated in Oct. 2018
@@ -17,7 +17,7 @@ class Beam:
(2) Beam demand, a dictionary including shear and flexural demands.
(3) Beam strength, a dictionary including shear and flexural strengths.
(4) Beam flag, a boolean variable with True or False. If it is True, the beam is feasible.
- """
+ """ # noqa: D205, D404
def __init__(
self,
@@ -34,7 +34,7 @@ def __init__(
:param shear_demand: a float number denoting the shear demand.
:param moment_demand_left: a float number denoting the moment demand at right end.
:param moment_demand_right: a float number denoting the moment demand at left end.
- """
+ """ # noqa: D205, D401, D404
# Assign the necessary information for column class
self.section = search_section_property(section_size, SECTION_DATABASE)
self.demand = {
@@ -71,7 +71,7 @@ def __init__(
def initialize_reduced_beam_section(self):
"""This method is used to initialize RBS dimensions.
:return: a dictionary including a, b, and c values describing RBS dimensions.
- """
+ """ # noqa: D205, D401, D404
# Use the lower bound as the initial value for a and b
self.RBS_dimension['a'] = 0.5 * self.section['bf']
self.RBS_dimension['b'] = 0.65 * self.section['d']
@@ -82,12 +82,12 @@ def check_flange(self, steel):
"""This method is used to check whether the flange is satisfied with highly ductile requirement.
: steel: a class defined in "steel_material.py" file
: return: a flag (integer) which denotes the flange check result.
- """
+ """ # noqa: D205, D401, D404
# Calculate equivalent flange width at reduced beam section
- R = (4 * self.RBS_dimension['c'] ** 2 + self.RBS_dimension['b'] ** 2) / (
+ R = (4 * self.RBS_dimension['c'] ** 2 + self.RBS_dimension['b'] ** 2) / ( # noqa: N806
8 * self.RBS_dimension['c']
)
- bf_RBS = (
+ bf_RBS = ( # noqa: N806
2 * (R - self.RBS_dimension['c'])
+ self.section['bf']
- 2 * np.sqrt(R**2 - (self.RBS_dimension['b'] / 3) ** 2)
@@ -106,7 +106,7 @@ def check_web(self, steel):
"""This method is used to check whether the web is satisfied with highly ductile requirement.
:param steel: a class defined in "steel_material.py" file.
:return: a flag (integer) which denotes the web check result.
- """
+ """ # noqa: D205, D401, D404
# Compute limit for web depth-to-width ratio
web_limit = 2.45 * np.sqrt(steel.E / steel.Fy)
# Check whether web is satisfied with the requirement or not
@@ -119,7 +119,7 @@ def determine_spacing_between_lateral_support(self, steel):
"""This method is used to compute the spacing between two lateral supports.
:param steel: a class defined in "steel_material.py" file.
:return: a float number indicating the spacing.
- """
+ """ # noqa: D205, D401, D404
# Compute limit for spacing (Remember to convert from inches to feet)
spacing_limit = 0.086 * self.section['ry'] * steel.E / steel.Fy * 1 / 12.0
# Start with the number of lateral support equal to 1
@@ -131,7 +131,7 @@ def determine_spacing_between_lateral_support(self, steel):
# Check whether the spacing is less than Lp
# If the spacing greater than Lp, then reduce the spacing such that the flexural strength is governed by
# plastic yielding.
- Lp = 1.76 * self.section['ry'] * np.sqrt(steel.E / steel.Fy)
+ Lp = 1.76 * self.section['ry'] * np.sqrt(steel.E / steel.Fy) # noqa: N806
while (self.length / number_lateral_support + 1) > Lp:
number_lateral_support += 1
self.spacing = self.length / (number_lateral_support + 1)
@@ -140,10 +140,10 @@ def check_shear_strength(self, steel):
"""This method is used to check whether the shear strength of column is sufficient or not
:param steel: a class defined in "steel_material.py" file
:return: a float number denoting the shear strength and a flag denoting whether shear strength is sufficient
- """
+ """ # noqa: D205, D400, D401, D404
# Compute shear strength of beam
- Cv = 1.0
- Vn = 0.6 * steel.Fy * (self.section['tw'] * self.section['d']) * Cv
+ Cv = 1.0 # noqa: N806
+ Vn = 0.6 * steel.Fy * (self.section['tw'] * self.section['d']) * Cv # noqa: N806
phi = 1.0
self.strength['shear'] = phi * Vn
# Check whether shear strength is sufficient
@@ -155,17 +155,17 @@ def check_shear_strength(self, steel):
def check_flexural_strength(self, steel):
"""This method is used to check whether the beam has enough flexural strength.
:return: a float number denoting flexural strength and a flag denoting whether the flexural strength is enough
- """
+ """ # noqa: D205, D400, D401, D404
# Compute plastic modulus at center of RBS
- Z_RBS = self.section['Zx'] - 2 * self.RBS_dimension['c'] * self.section[
+ Z_RBS = self.section['Zx'] - 2 * self.RBS_dimension['c'] * self.section[ # noqa: N806
'tf'
] * (self.section['d'] - self.section['tf'])
# Calculate the moment capacity governed by plastic yielding at RBS
- Mn_RBS = steel.Fy * Z_RBS
+ Mn_RBS = steel.Fy * Z_RBS # noqa: N806
phi = 0.9
self.strength['flexural RBS'] = phi * Mn_RBS
# Check whether the flexural strength is sufficient
- M_max = np.max(
+ M_max = np.max( # noqa: N806
[abs(self.demand['moment right']), abs(self.demand['moment left'])]
)
if self.strength['flexural RBS'] >= M_max:
@@ -176,17 +176,17 @@ def check_flexural_strength(self, steel):
def check_flag(self):
"""This method is used to test whether beam passes all checks.
:return: a bool variable. True ==> passed
- """
+ """ # noqa: D205, D400, D401, D404
self.flag = True
- for key in self.is_feasible.keys():
- if self.is_feasible[key] == False:
+ for key in self.is_feasible.keys(): # noqa: SIM118
+ if self.is_feasible[key] == False: # noqa: E712
self.flag = False
return self.flag
def compute_demand_capacity_ratio(self):
"""This method is used to compute demand to capacity ratios.
:return: a dictionary which includes the ratios for shear force and flexural moment.
- """
+ """ # noqa: D205, D401, D404
self.demand_capacity_ratio['shear'] = (
self.demand['shear'] / self.strength['shear']
)
@@ -198,7 +198,7 @@ def compute_demand_capacity_ratio(self):
def calculate_hinge_parameters(self, steel):
"""This method is used to compute the modeling parameters for plastic hinge using modified IMK material model.
:return: a dictionary including each parameters required for nonlinear modeling in OpenSees.
- """
+ """ # noqa: D205, D401, D404
# Following content is based on the following reference:
# [1] Hysteretic models that incorporate strength and stiffness deterioration
# [2] Deterioration modeling of steel components in support of collapse prediction of steel moment frames under
@@ -219,7 +219,7 @@ def calculate_hinge_parameters(self, steel):
# beam spacing and length is in feet, remember to convert it to inches
c1 = 25.4 # c1_unit
c2 = 6.895 # c2_unit
- McMy = 1.10 # Capping moment to yielding moment ratio. Lignos et al. used 1.05 whereas Prof. Burton used 1.11.
+ McMy = 1.10 # Capping moment to yielding moment ratio. Lignos et al. used 1.05 whereas Prof. Burton used 1.11. # noqa: N806
h = self.section['d'] - 2 * self.section['tf'] # Web depth
self.plastic_hinge['K0'] = (
6 * steel.E * self.section['Ix'] / (self.length * 12.0)
diff --git a/modules/createSAM/AutoSDA/building_information.py b/modules/createSAM/AutoSDA/building_information.py
index 499106652..3a0133a69 100644
--- a/modules/createSAM/AutoSDA/building_information.py
+++ b/modules/createSAM/AutoSDA/building_information.py
@@ -1,4 +1,4 @@
-# This file is used to define the class of Building
+# This file is used to define the class of Building # noqa: INP001, D100
# Developed by GUAN, XINGQUAN @ UCLA in June 2018
# Updated in Sept. 2018
@@ -67,13 +67,13 @@ class Building:
(5) Compute lateral force for the building based on ASCE 7-10
(6) Determine possible section sizes for columns and beams based on user-specified section depth
(7) Propose initial beam and column sizes
- """
+ """ # noqa: D205, D400, D404
- def __init__(self, base_directory, pathDataFolder, workingDirectory):
+ def __init__(self, base_directory, pathDataFolder, workingDirectory): # noqa: N803
"""This function initializes the attributes of a building instance
:param building_id: a string that used as a UID to label the building
:param base_directory: a string that denotes the path to root folder
- """
+ """ # noqa: D205, D400, D401, D404
# Assign basic information: unique ID for the building and base path
self.base_directory = base_directory
self.dataFolderDirectory = pathDataFolder
@@ -102,7 +102,7 @@ def __init__(self, base_directory, pathDataFolder, workingDirectory):
self.initialize_member()
# self.initialize_member_v2()
- def define_directory(self):
+ def define_directory(self): # noqa: D102
# Define all useful paths based on the path to root folder
# Define path to folder where the baseline .tcl files for elastic analysis are saved
baseline_elastic_directory = (
@@ -141,23 +141,23 @@ def read_geometry(self):
"""This method is used to read the building geometry information from .csv files:
(1) Change the working directory to the folder where .csv data are stored
(2) Open the .csv file and save all relevant information to the object itself
- """
+ """ # noqa: D205, D400, D401, D404
os.chdir(self.directory['building data'])
- with open('Geometry.csv') as csvfile:
+ with open('Geometry.csv') as csvfile: # noqa: PTH123
geometry_data = pd.read_csv(csvfile, header=0)
# Each variable is a scalar
number_of_story = geometry_data.loc[0, 'number of story']
- number_of_X_bay = geometry_data.loc[0, 'number of X bay']
- number_of_Z_bay = geometry_data.loc[0, 'number of Z bay']
+ number_of_X_bay = geometry_data.loc[0, 'number of X bay'] # noqa: N806
+ number_of_Z_bay = geometry_data.loc[0, 'number of Z bay'] # noqa: N806
first_story_height = geometry_data.loc[0, 'first story height']
typical_story_height = geometry_data.loc[0, 'typical story height']
- X_bay_width = geometry_data.loc[0, 'X bay width']
- Z_bay_width = geometry_data.loc[0, 'Z bay width']
- number_of_X_LFRS = geometry_data.loc[
+ X_bay_width = geometry_data.loc[0, 'X bay width'] # noqa: N806
+ Z_bay_width = geometry_data.loc[0, 'Z bay width'] # noqa: N806
+ number_of_X_LFRS = geometry_data.loc[ # noqa: N806
0, 'number of X LFRS'
] # number of lateral resisting frame in X direction
- number_of_Z_LFRS = geometry_data.loc[
+ number_of_Z_LFRS = geometry_data.loc[ # noqa: N806
0, 'number of Z LFRS'
] # number of lateral resisting frame in Z direction
# Call function defined in "help_functions.py" to determine the height for each floor level
@@ -182,9 +182,9 @@ def read_gravity_loads(self):
"""This method is used to read the load information from .csv files
(1) Change the directory to the folder where the load data are stored
(2) Read the .csv files and assign save load values to object values
- """
+ """ # noqa: D205, D400, D401, D404
os.chdir(self.directory['building data'])
- with open('Loads.csv') as csvfile:
+ with open('Loads.csv') as csvfile: # noqa: PTH123
loads_data = pd.read_csv(csvfile, header=0)
# for i in loads_data._iter_column_arrays():
@@ -195,12 +195,12 @@ def read_gravity_loads(self):
val = i[j]
try:
float(val)
- except:
+ except: # noqa: E722
rv = RV_ARRAY.get(val, 0)
if rv != 0:
i[j] = rv
else:
- print('Error getting an RV with the key', val)
+ print('Error getting an RV with the key', val) # noqa: T201
return
# All data is a list (array). Length is the number of story
@@ -217,7 +217,7 @@ def read_gravity_loads(self):
float
)
- print(floor_weight)
+ print(floor_weight) # noqa: T201
# Store all necessary information into a dictionary named gravity_loads
self.gravity_loads = {
@@ -234,30 +234,30 @@ def read_elf_parameters(self):
"""This method is used to read equivalent lateral force (in short: elf) parameters and calculate SDS and SD1
(1) Read equivalent lateral force parameters
(2) Calculate SMS, SM1, SDS, SD1 values and save them into the attribute
- """
+ """ # noqa: D205, D400, D401, D404
os.chdir(self.directory['building data'])
- with open('ELFParameters.csv') as csvfile:
+ with open('ELFParameters.csv') as csvfile: # noqa: PTH123
elf_parameters_data = pd.read_csv(csvfile, header=0)
# Determine Fa and Fv coefficient based on site class and Ss and S1 (ASCE 7-10 Table 11.4-1 & 11.4-2)
# Call function defined in "help_functions.py" to calculate two coefficients: Fa and Fv
- Fa = determine_Fa_coefficient(
+ Fa = determine_Fa_coefficient( # noqa: N806
elf_parameters_data.loc[0, 'site class'],
elf_parameters_data.loc[0, 'Ss'],
)
- Fv = determine_Fv_coefficient(
+ Fv = determine_Fv_coefficient( # noqa: N806
elf_parameters_data.loc[0, 'site class'],
elf_parameters_data.loc[0, 'S1'],
)
# Determine SMS, SM1, SDS, SD1 using the defined function in "help_functions.py"
- SMS, SM1, SDS, SD1 = calculate_DBE_acceleration(
+ SMS, SM1, SDS, SD1 = calculate_DBE_acceleration( # noqa: N806
elf_parameters_data.loc[0, 'Ss'],
elf_parameters_data.loc[0, 'S1'],
Fa,
Fv,
)
# Determine Cu using the defined function in "help_functions.py"
- Cu = determine_Cu_coefficient(SD1)
+ Cu = determine_Cu_coefficient(SD1) # noqa: N806
# Calculate the building period: approximate fundamental period and upper bound period
approximate_period = elf_parameters_data.loc[0, 'Ct'] * (
@@ -294,7 +294,7 @@ def compute_seismic_force(self):
(2) Determine the correct period between first mode period and CuTa
(3) Determine the Cs coefficient
(4) Determine the lateral force at each floor level (ground to roof) and save it in an array
- """
+ """ # noqa: D205, D400, D401, D404
# Please note that the period for computing the required strength should be bounded by CuTa
period_for_strength = min(
self.elf_parameters['modal period'], self.elf_parameters['period']
@@ -307,7 +307,7 @@ def compute_seismic_force(self):
else:
period_for_drift = self.elf_parameters['modal period']
# Call function defined in "help_functions.py" to determine the seismic response coefficient
- Cs_for_strength = calculate_Cs_coefficient(
+ Cs_for_strength = calculate_Cs_coefficient( # noqa: N806
self.elf_parameters['SDS'],
self.elf_parameters['SD1'],
self.elf_parameters['S1'],
@@ -316,7 +316,7 @@ def compute_seismic_force(self):
self.elf_parameters['R'],
self.elf_parameters['Ie'],
)
- Cs_for_drift = calculate_Cs_coefficient(
+ Cs_for_drift = calculate_Cs_coefficient( # noqa: N806
self.elf_parameters['SDS'],
self.elf_parameters['SD1'],
self.elf_parameters['S1'],
@@ -368,10 +368,10 @@ def compute_seismic_force(self):
def determine_member_candidate(self):
"""This method is used to determine all possible member candidates based on the user-specified section depth
:return: a dictionary which contains the all possible sizes for exterior columns, interior columns, and beams.
- """
+ """ # noqa: D205, D401, D404
# Read the user-specified depths for interior columns, exterior columns, and beams.
os.chdir(self.directory['building data'])
- with open('MemberDepth.csv') as csvfile:
+ with open('MemberDepth.csv') as csvfile: # noqa: PTH123
depth_data = pd.read_csv(csvfile, header=0)
# Initialize dictionary that will be used to store all possible section sizes for each member (in each story)
interior_column_candidate = {}
@@ -440,7 +440,7 @@ def determine_member_candidate(self):
def initialize_member(self):
"""This method is used to initialize the member size
:return: a dictionary which includes the initial size for interior columns, exterior columns, and beams
- """
+ """ # noqa: D205, D400, D401, D404
# Define initial sizes for columns and beams
interior_column = []
exterior_column = []
@@ -480,7 +480,7 @@ def read_modal_period(self):
"""This method is used to read the modal period from OpenSees eigen value analysis results and store it in ELF
parameters.
:return: the first mode period stored in self.elf_parameters
- """
+ """ # noqa: D205, D400, D401, D404
# Change the working directory to the folder where the eigen value analysis results are stored
path_modal_period = (
self.directory['building elastic model'] + '/EigenAnalysis'
@@ -499,7 +499,7 @@ def read_story_drift(self):
"""This method is used to read the story drifts from OpenSees elastic analysis results and stored it as attribute
The load case for story drift is the combination of dead, live, and earthquake loads.
:return: an [story*1] array which includes the story drifts for each story.
- """
+ """ # noqa: D205, D401, D404
# Change the working directory to the folder where story drifts are stored
path_story_drift = (
self.directory['building elastic model']
@@ -518,7 +518,7 @@ def read_story_drift(self):
def optimize_member_for_drift(self):
"""This method is used to decrease the member size such that the design is most economic.
:return: update self.member_size
- """
+ """ # noqa: D205, D400, D401, D404
# Find the story which has the smallest drift
target_story = np.where(
self.elastic_response['story drift']
@@ -561,7 +561,7 @@ def upscale_column(self, target_story, type_column):
:param target_story: a scalar to denote which story column shall be increased (from 0 to total story # - 1).
:param type_column: a string denoting whether it is an exterior column or interior column
:return: update the column size stored in self.member_size
- """
+ """ # noqa: D205, D400, D401, D404
temp_size = increase_member_size(
self.element_candidate[type_column]['story %s' % (target_story + 1)],
self.member_size[type_column][target_story],
@@ -575,7 +575,7 @@ def upscale_beam(self, target_floor):
"""This method is used to increase beam size which might be necessary when beam strength is not sufficient
:param target_floor: a scalar to denote which floor beam shall be improved. (from 0 to total story # - 1)
:return: update the beam size stored in self.member_size
- """
+ """ # noqa: D205, D400, D401, D404
temp_size = increase_member_size(
self.element_candidate['beam']['floor level %s' % (target_floor + 2)],
self.member_size['beam'][target_floor],
@@ -612,7 +612,7 @@ def upscale_beam(self, target_floor):
def constructability_beam(self):
"""This method is used to update the beam member size by considering the constructability (ease of construction).
:return: update the beam sizes stored in self.member_size['beam']
- """
+ """ # noqa: D205, D400, D401, D404
# Make a deep copy of the member sizes and stored them in a new dictionary named construction_size
# Use deep copy to avoid changing the variables stored in member size
temp_size = copy.deepcopy(self.member_size)
@@ -630,7 +630,7 @@ def constructability_beam(self):
def constructability_column(self):
"""This method is used to update the column member size by considering the constructability (ease of construction).
:return: update the column sizes stored in self.member_size
- """
+ """ # noqa: D205, D400, D401, D404
# Make a copy of the member size
temp_size = copy.deepcopy(self.member_size)
# Update column sizes based on the sorted Ix
diff --git a/modules/createSAM/AutoSDA/column_component.py b/modules/createSAM/AutoSDA/column_component.py
index a0d97b6ae..e9d56768f 100644
--- a/modules/createSAM/AutoSDA/column_component.py
+++ b/modules/createSAM/AutoSDA/column_component.py
@@ -1,4 +1,4 @@
-# This file is used to define the class of column, which includes the axial, shear, and flexural strengths of column
+# This file is used to define the class of column, which includes the axial, shear, and flexural strengths of column # noqa: INP001, D100
# Developed by GUAN, XINGQUAN @ UCLA in Apr. 2018
# Updated in Oct. 2018
@@ -18,7 +18,7 @@ class Column:
(2) Column demand, a dictionary including axial, shear, and flexural demands.
(3) Column strength, a dictionary including axial, shear, and flexural strengths.
(4) Column flag, an integer with value of zero or nonzero. If it's zero, the column is feasible.
- """
+ """ # noqa: D205, D404
def __init__(
self,
@@ -27,8 +27,8 @@ def __init__(
shear_demand,
moment_demand_bot,
moment_demand_top,
- Lx,
- Ly,
+ Lx, # noqa: N803
+ Ly, # noqa: N803
steel,
):
"""This function initializes the attributes of class of column.
@@ -39,7 +39,7 @@ def __init__(
:param moment_demand_top: a float number which describes moment demand at top of column.
:param Lx: unbraced length in x direction.
:param Ly: unbraced length in y direction.
- """
+ """ # noqa: D205, D401, D404
# Assign the necessary information for column class
self.section = search_section_property(section_size, SECTION_DATABASE)
self.demand = {
@@ -77,7 +77,7 @@ def check_flange(self, steel):
Seismic Design Manual Table D1.1.
:param steel: a class defined in "steel_material.py" file.
:return: a boolean variable which denotes the flange check results.
- """
+ """ # noqa: D205, D401, D404
flange_limit = 0.30 * np.sqrt(steel.E / steel.Fy)
# If flag is still zero after checking the limitation. Then the highly ductile requirement is met.
# Otherwise, it is not satisfied.
@@ -91,11 +91,11 @@ def check_web(self, steel):
Seismic Design Manual Table D1.1.
:param steel: a class defined in "steel_material.py" file.
:return: a boolean variable which denotes the flange check results.
- """
+ """ # noqa: D205, D401, D404
# Compute the limit for web depth-to-thickness ratio
phi = 0.9
- Ca = self.demand['axial'] / (phi * steel.Fy * self.section['A'])
- if Ca <= 0.125:
+ Ca = self.demand['axial'] / (phi * steel.Fy * self.section['A']) # noqa: N806
+ if Ca <= 0.125: # noqa: PLR2004
web_limit = 2.45 * np.sqrt(steel.E / steel.Fy) * (1 - 0.93 * Ca)
else:
web_limit = np.max(
@@ -115,10 +115,10 @@ def check_axial_strength(self, steel):
:param steel: a class defined in "steel_material.py" file.
:return: a float number denoting the axial strength
and a boolean variable denoting whether the column strength is enough.
- """
+ """ # noqa: D205, D401, D404
# Default values for two coefficient
- Kx = 1.0
- Ky = 1.0
+ Kx = 1.0 # noqa: N806
+ Ky = 1.0 # noqa: N806
slenderness_ratio = max(
[
Kx * self.unbraced_length['x'] / self.section['rx'],
@@ -126,14 +126,14 @@ def check_axial_strength(self, steel):
]
)
# Compute elastic buckling stress
- Fe = np.pi**2 * steel.E / (slenderness_ratio**2)
+ Fe = np.pi**2 * steel.E / (slenderness_ratio**2) # noqa: N806
# Calculate critical stress
if slenderness_ratio <= (4.71 * np.sqrt(steel.E / steel.Fy)):
- Fcr = 0.658 ** (steel.Fy / Fe) * steel.Fy
+ Fcr = 0.658 ** (steel.Fy / Fe) * steel.Fy # noqa: N806
else:
- Fcr = 0.877 * Fe
+ Fcr = 0.877 * Fe # noqa: N806
# Compute nominal compressive strength
- Pn = Fcr * self.section['A']
+ Pn = Fcr * self.section['A'] # noqa: N806
# Store axial strength into "strength" dictionary
phi = 0.9
self.strength['axial'] = phi * Pn
@@ -148,10 +148,10 @@ def check_shear_strength(self, steel):
:param steel: a class defined in "steel_material.py" file.
:return: a float number denoting shear strength
and a boolean variable denoting whether shear strength is enough.
- """
- Cv = 1.0
+ """ # noqa: D205, D401, D404
+ Cv = 1.0 # noqa: N806
# Compute nominal shear strength
- Vn = 0.6 * steel.Fy * (self.section['tw'] * self.section['d']) * Cv
+ Vn = 0.6 * steel.Fy * (self.section['tw'] * self.section['d']) * Cv # noqa: N806
phi = 0.9
# Store the shear strength into "strength" dictionary
self.strength['shear'] = phi * Vn
@@ -166,7 +166,7 @@ def check_flexural_strength(self, steel):
:param steel: a class defined in "steel_material.py" file.
:return: a float number denoting the flexural strength
and a boolean denoting whether flexural strength is enough.
- """
+ """ # noqa: D205, D401, D404
# Compute the distance between center lines of top and bottom flanges
h0 = self.section['d'] - self.section['tf']
# Determine coefficient: based whether it is a "W" section
@@ -175,37 +175,37 @@ def check_flexural_strength(self, steel):
else:
c = h0 / 2 * np.sqrt(self.section['Iy'] / self.section['Cw'])
# Compute Lp and Lr, both of which are necessary to determine flexural strength
- Lp = 1.76 * self.section['ry'] * np.sqrt(steel.E / steel.Fy)
+ Lp = 1.76 * self.section['ry'] * np.sqrt(steel.E / steel.Fy) # noqa: N806
temp1 = np.sqrt(
(self.section['J'] * c / (self.section['Sx'] * h0)) ** 2
+ 6.76 * (0.7 * steel.Fy / steel.E) ** 2
)
temp2 = np.sqrt(self.section['J'] * c / (self.section['Sx'] * h0) + temp1)
- Lr = 1.95 * self.section['rts'] * steel.E / (0.7 * steel.Fy) * temp2
+ Lr = 1.95 * self.section['rts'] * steel.E / (0.7 * steel.Fy) * temp2 # noqa: N806
# Unbraced length
- Lb = min([self.unbraced_length['x'], self.unbraced_length['y']])
+ Lb = min([self.unbraced_length['x'], self.unbraced_length['y']]) # noqa: N806
# Compute moment capacity governed by plastic yielding
- Mp = steel.Fy * self.section['Zx']
+ Mp = steel.Fy * self.section['Zx'] # noqa: N806
# Compute MA, MB, and MC coefficients, all of which are necessary to compute Cb coefficient
# See page 16.1-46 in Seismic Design Manual
- M_max = np.max(
+ M_max = np.max( # noqa: N806
[abs(self.demand['moment bottom']), abs(self.demand['moment top'])]
)
linear_function = interpolate.interp1d(
[0, 1], [self.demand['moment bottom'], (-1) * self.demand['moment top']]
)
- [MA, MB, MC] = np.abs(linear_function([0.25, 0.50, 0.75]))
- Cb = 12.5 * M_max / (2.5 * M_max + 3 * MA + 4 * MB + 3 * MC)
+ [MA, MB, MC] = np.abs(linear_function([0.25, 0.50, 0.75])) # noqa: N806
+ Cb = 12.5 * M_max / (2.5 * M_max + 3 * MA + 4 * MB + 3 * MC) # noqa: N806
# Calculate moment capacity based on unbraced length: case-by-case analysis
# Case I: flexural strength is governed by plastic yielding
# Case II: flexural strength is governed by lateral torsional buckling with Lp < Lb <= Lr
# Case III: flexural strength is governed by lateral torsional buckling with Lb > Lr
if Lb <= Lp:
- Mn = Mp
+ Mn = Mp # noqa: N806
elif Lb <= Lr:
- Mn = Cb * (
+ Mn = Cb * ( # noqa: N806
Mp
- (Mp - 0.7 * steel.Fy * self.section['Sx']) * (Lb - Lp) / (Lr - Lp)
)
@@ -215,10 +215,10 @@ def check_flexural_strength(self, steel):
/ (self.section['Sx'] * h0)
* (Lb / self.section['rts']) ** 2
)
- Fcr = Cb * np.pi**2 * steel.E / ((Lb / self.section['rts']) ** 2) * temp
- Mn = Fcr * self.section['Sx']
+ Fcr = Cb * np.pi**2 * steel.E / ((Lb / self.section['rts']) ** 2) * temp # noqa: N806
+ Mn = Fcr * self.section['Sx'] # noqa: N806
# Attention no matter which case the column is, the flexural strength cannot exceed plastic moment capacity
- Mn = np.min([Mn, Mp])
+ Mn = np.min([Mn, Mp]) # noqa: N806
# Store the flexural strength into "strength" dictionary
phi = 0.9
@@ -232,21 +232,21 @@ def check_flexural_strength(self, steel):
def check_combined_loads(self):
"""This method is whether the strength is sufficient for column subjected to combined loading.
:return: a boolean variable denoting whether the strength is sufficient under combined loading.
- """
+ """ # noqa: D205, D401, D404
# Obtain the axial capacity and moment capacity
phi = 0.9
- Pc = self.strength['axial'] / phi
- Mcx = self.strength['flexural'] / phi
- Pr = self.demand['axial']
+ Pc = self.strength['axial'] / phi # noqa: N806
+ Mcx = self.strength['flexural'] / phi # noqa: N806
+ Pr = self.demand['axial'] # noqa: N806
# Determine the governing moment:
# Maximum value from moments at two ends
- Mrx = np.max(
+ Mrx = np.max( # noqa: N806
[abs(self.demand['moment bottom']), abs(self.demand['moment top'])]
)
# Case-by-case analysis:
# Case I: axial load ratio is less than or equal to 0.2
# Case II: axial load ratio is greater than 0.2
- if Pr / Pc <= 0.2:
+ if Pr / Pc <= 0.2: # noqa: PLR2004
combination = Pr / Pc + 8 / 9 * (Mrx / Mcx)
else:
combination = Pr / (2 * Pc) + (Mrx / Mcx)
@@ -259,17 +259,17 @@ def check_combined_loads(self):
def check_flag(self):
"""This method is used check whether the column passes all checks.
:return: a boolean variable indicating whether column is feasible or not.
- """
+ """ # noqa: D205, D401, D404
self.flag = True
- for key in self.is_feasible.keys():
- if self.is_feasible[key] == False:
+ for key in self.is_feasible.keys(): # noqa: SIM118
+ if self.is_feasible[key] == False: # noqa: E712
self.flag = False
return self.flag
def compute_demand_capacity_ratio(self):
"""This method is used to calculate the demand to capacity ratios for column components
:return: a dictionary which includes ratios for axial force, shear force, flexural moment, and combined loading.
- """
+ """ # noqa: D205, D401, D404
self.demand_capacity_ratio['axial'] = (
self.demand['axial'] / self.strength['axial']
)
@@ -284,7 +284,7 @@ def compute_demand_capacity_ratio(self):
def calculate_hinge_parameters(self, steel):
"""This method is used to compute the modeling parameters for plastic hinge using modified IMK material model.
:return: a dictionary including each parameters required for nonlinear modeling in OpenSees.
- """
+ """ # noqa: D205, D401, D404
# Following content is based on the following reference:
# [1] Hysteretic models that incorporate strength and stiffness deterioration
# [2] Deterioration modeling of steel components in support of collapse prediction of steel moment frames under
@@ -304,18 +304,18 @@ def calculate_hinge_parameters(self, steel):
# Note that for column, the unbraced length is the column length itself.
# units: kips, inches
# Note that column unbraced length is in feet, remember to convert it to inches
- c1 = 25.4 # c1_unit
- c2 = 6.895 # c2_unit
+ c1 = 25.4 # c1_unit # noqa: F841
+ c2 = 6.895 # c2_unit # noqa: F841
h = self.section['d'] - 2 * self.section['tf'] # Web depth
# Capping moment to yielding moment ratio. Lignos et al. used 1.05 whereas Prof. Burton used 1.11.
- McMy = (
+ McMy = ( # noqa: N806
12.5
* (h / self.section['tw']) ** (-0.2)
* (self.unbraced_length['x'] * 12.0 / self.section['ry']) ** (-0.4)
* (1 - self.demand_capacity_ratio['axial']) ** (0.4)
)
- McMy = max(McMy, 1.0)
- McMy = min(McMy, 1.3)
+ McMy = max(McMy, 1.0) # noqa: N806
+ McMy = min(McMy, 1.3) # noqa: N806
# Beam component rotational stiffness
self.plastic_hinge['K0'] = (
6 * steel.E * self.section['Ix'] / (self.unbraced_length['x'] * 12.0)
@@ -323,7 +323,7 @@ def calculate_hinge_parameters(self, steel):
# Flexual strength
self.plastic_hinge['Myp'] = self.section['Zx'] * steel.Fy
# Effective flexural strength
- if self.demand_capacity_ratio['axial'] <= 0.2:
+ if self.demand_capacity_ratio['axial'] <= 0.2: # noqa: PLR2004
self.plastic_hinge['My'] = (
1.15
* steel.Ry
@@ -340,7 +340,7 @@ def calculate_hinge_parameters(self, steel):
* (1 - self.demand_capacity_ratio['axial'])
)
# Reference cumulative plastic rotation:
- if self.demand_capacity_ratio['axial'] <= 0.35:
+ if self.demand_capacity_ratio['axial'] <= 0.35: # noqa: PLR2004
self.plastic_hinge['Lambda'] = (
255000
* (h / self.section['tw']) ** (-2.14)
diff --git a/modules/createSAM/AutoSDA/connection_part.py b/modules/createSAM/AutoSDA/connection_part.py
index 235fe3118..0483cc614 100644
--- a/modules/createSAM/AutoSDA/connection_part.py
+++ b/modules/createSAM/AutoSDA/connection_part.py
@@ -1,4 +1,4 @@
-# This file is used to define the class of beam-column connection, which includes beam/column depth
+# This file is used to define the class of beam-column connection, which includes beam/column depth # noqa: INP001, D100
# check, RBS dimensions, moment capacity at column face, strong-column-weak-beam check, and panel zone
# thickness (doubler plate)
@@ -39,7 +39,7 @@ class Connection:
(8) Check shear strength of beam
(9) Check whether strong column weak beam is satisfied
(10) Calculate doubler plate thickness
- """
+ """ # noqa: D205, D400, D404
def __init__(
self,
@@ -71,7 +71,7 @@ def __init__(
upper story of the connection.
:param bottom_column: a class defined in "column_component.py" file which refers the column in
lower story of the connection.
- """
+ """ # noqa: D205, D401, D404
self.connection_type = connection_type
# The dictionary used to store the RBS dimensions
self.left_RBS_dimension = {}
@@ -125,7 +125,7 @@ def check_column_beam(
:return: a boolean result stored in is_feasible dictionary.
Actually, this method should always be true because all beam and column members are selected from a
database that non-prequalified sizes have been removed.
- """
+ """ # noqa: D205, D401, D404
# Extract the beam depth and weight
if connection_type == 'typical exterior':
# Connection only has one beam and two columns
@@ -136,10 +136,10 @@ def check_column_beam(
bottom_column.section['section size']
)
if (
- left_beam_depth <= 36
- and left_beam_weight <= 300
- and top_column_depth <= 36
- and bottom_column_depth <= 36
+ left_beam_depth <= 36 # noqa: PLR2004
+ and left_beam_weight <= 300 # noqa: PLR2004
+ and top_column_depth <= 36 # noqa: PLR2004
+ and bottom_column_depth <= 36 # noqa: PLR2004
):
self.is_feasible['geometry limits'] = True
else:
@@ -160,9 +160,9 @@ def check_column_beam(
bottom_column.section['section size']
)
if (
- left_beam_depth <= 36
- and left_beam_weight <= 300
- and bottom_column_depth <= 36
+ left_beam_depth <= 36 # noqa: PLR2004
+ and left_beam_weight <= 300 # noqa: PLR2004
+ and bottom_column_depth <= 36 # noqa: PLR2004
):
self.is_feasible['geometry limits'] = True
else:
@@ -181,12 +181,12 @@ def check_column_beam(
bottom_column.section['section size']
)
if (
- left_beam_depth <= 36
- and right_beam_depth <= 36
- and left_beam_weight <= 300
- and right_beam_weight <= 300
- and top_column_depth <= 36
- and bottom_column_depth <= 36
+ left_beam_depth <= 36 # noqa: PLR2004
+ and right_beam_depth <= 36 # noqa: PLR2004
+ and left_beam_weight <= 300 # noqa: PLR2004
+ and right_beam_weight <= 300 # noqa: PLR2004
+ and top_column_depth <= 36 # noqa: PLR2004
+ and bottom_column_depth <= 36 # noqa: PLR2004
):
self.is_feasible['geometry limits'] = True
else:
@@ -204,11 +204,11 @@ def check_column_beam(
bottom_column.section['section size']
)
if (
- left_beam_depth <= 36
- and right_beam_depth <= 36
- and left_beam_weight <= 300
- and right_beam_weight <= 300
- and bottom_column_depth <= 36
+ left_beam_depth <= 36 # noqa: PLR2004
+ and right_beam_depth <= 36 # noqa: PLR2004
+ and left_beam_weight <= 300 # noqa: PLR2004
+ and right_beam_weight <= 300 # noqa: PLR2004
+ and bottom_column_depth <= 36 # noqa: PLR2004
):
self.is_feasible['geometry limits'] = True
else:
@@ -226,15 +226,15 @@ def extract_reduced_beam_section(self, connection_type, left_beam, right_beam):
"""This method is used to extract the RBS dimensions into one (or two) dictionary.
The explanations for input arguments are presented in __init__() function.
:return: one (two) dictionary which contains the RBS dimensions.
- """
+ """ # noqa: D205, D401, D404
if (
- connection_type == 'typical exterior'
+ connection_type == 'typical exterior' # noqa: PLR1714
or connection_type == 'top exterior'
):
# The connection only has one beam in this case
self.left_RBS_dimension = copy.deepcopy(left_beam.RBS_dimension)
elif (
- connection_type == 'typical interior'
+ connection_type == 'typical interior' # noqa: PLR1714
or connection_type == 'top interior'
):
# The connection has two beams at both sides
@@ -246,7 +246,7 @@ def extract_reduced_beam_section(self, connection_type, left_beam, right_beam):
)
sys.exit(2)
- def compute_probable_moment_RBS(
+ def compute_probable_moment_RBS( # noqa: N802
self,
connection_type,
steel,
@@ -255,30 +255,30 @@ def compute_probable_moment_RBS(
):
"""This method is used to compute section modulus at RBS center (step 2 and 3 in ANSI Section 5.8)
:return: a dictionary which includes the probable moment at RBS center
- """
- Cpr = (steel.Fy + steel.Fu) / (2 * steel.Fy)
- Cpr = min(1.2, Cpr)
+ """ # noqa: D205, D400, D401, D404
+ Cpr = (steel.Fy + steel.Fu) / (2 * steel.Fy) # noqa: N806
+ Cpr = min(1.2, Cpr) # noqa: N806
if (
- connection_type == 'typical exterior'
+ connection_type == 'typical exterior' # noqa: PLR1714
or connection_type == 'top exterior'
):
- left_Z_RBS = left_beam.section['Zx'] - 2 * left_beam.RBS_dimension[
+ left_Z_RBS = left_beam.section['Zx'] - 2 * left_beam.RBS_dimension[ # noqa: N806
'c'
] * left_beam.section['tf'] * (
left_beam.section['d'] - left_beam.section['tf']
)
self.moment['Mpr1'] = Cpr * steel.Ry * steel.Fy * left_Z_RBS
elif (
- connection_type == 'typical interior'
+ connection_type == 'typical interior' # noqa: PLR1714
or connection_type == 'top interior'
):
- left_Z_RBS = left_beam.section['Zx'] - 2 * left_beam.RBS_dimension[
+ left_Z_RBS = left_beam.section['Zx'] - 2 * left_beam.RBS_dimension[ # noqa: N806
'c'
] * left_beam.section['tf'] * (
left_beam.section['d'] - left_beam.section['tf']
)
self.moment['Mpr1'] = Cpr * steel.Ry * steel.Fy * left_Z_RBS
- right_Z_RBS = right_beam.section['Zx'] - 2 * right_beam.RBS_dimension[
+ right_Z_RBS = right_beam.section['Zx'] - 2 * right_beam.RBS_dimension[ # noqa: N806
'c'
] * right_beam.section['tf'] * (
right_beam.section['d'] - right_beam.section['tf']
@@ -290,7 +290,7 @@ def compute_probable_moment_RBS(
)
sys.exit(2)
- def compute_shear_force_RBS(
+ def compute_shear_force_RBS( # noqa: N802
self,
connection_type,
beam_dead_load,
@@ -300,7 +300,7 @@ def compute_shear_force_RBS(
):
"""This method calculates the shear force at the center of RBS (step 4 in ANSI Section 5.8)
:return: a dictionary which includes the shear forces
- """
+ """ # noqa: D205, D400, D401, D404
# Be cautious: beam_dead_load read here is in the unit of lb/ft
# The unit should be converted from lb/ft to kips/inch
wu = (
@@ -308,15 +308,15 @@ def compute_shear_force_RBS(
+ 0.5 * (beam_live_load * 0.001 / 12)
+ 0.2 * 0
)
- Sh = self.left_RBS_dimension['a'] + self.left_RBS_dimension['b'] / 2
- Lh = span * 12.0 - 2 * bottom_column.section['d'] - 2 * Sh
+ Sh = self.left_RBS_dimension['a'] + self.left_RBS_dimension['b'] / 2 # noqa: N806
+ Lh = span * 12.0 - 2 * bottom_column.section['d'] - 2 * Sh # noqa: N806
if (
- connection_type == 'typical exterior'
+ connection_type == 'typical exterior' # noqa: PLR1714
or connection_type == 'top exterior'
):
self.shear_force['VRBS1'] = 2 * self.moment['Mpr1'] / Lh + wu * Lh / 2
elif (
- connection_type == 'typical interior'
+ connection_type == 'typical interior' # noqa: PLR1714
or connection_type == 'top interior'
):
self.shear_force['VRBS1'] = 2 * self.moment['Mpr1'] / Lh + wu * Lh / 2
@@ -330,15 +330,15 @@ def compute_shear_force_RBS(
def compute_probable_moment_column_face(self, connection_type):
"""This method calculates the probable maximum moment at the face of the column. (step 5 in ANSI Section 5.8)
:return: Store probable maximum moment at column face into the dictionary
- """
- Sh = self.left_RBS_dimension['a'] + self.left_RBS_dimension['b'] / 2
+ """ # noqa: D205, D400, D401, D404
+ Sh = self.left_RBS_dimension['a'] + self.left_RBS_dimension['b'] / 2 # noqa: N806
if (
- connection_type == 'typical exterior'
+ connection_type == 'typical exterior' # noqa: PLR1714
or connection_type == 'top exterior'
):
self.moment['Mf1'] = self.moment['Mpr1'] + self.shear_force['VRBS1'] * Sh
elif (
- connection_type == 'typical interior'
+ connection_type == 'typical interior' # noqa: PLR1714
or connection_type == 'top interior'
):
self.moment['Mf1'] = self.moment['Mpr1'] + self.shear_force['VRBS1'] * Sh
@@ -353,14 +353,14 @@ def compute_plastic_moment(self, connection_type, steel, left_beam, right_beam):
"""This method calculates the plastic moment of the beam based on expected yield stress.
(step 6 in ANSI Section 5.8)
:return: Store the plastic moment to the dictionary.
- """
+ """ # noqa: D205, D401, D404
if (
- connection_type == 'typical exterior'
+ connection_type == 'typical exterior' # noqa: PLR1714
or connection_type == 'top exterior'
):
self.moment['Mpe1'] = steel.Ry * steel.Fy * left_beam.section['Zx']
elif (
- connection_type == 'typical interior'
+ connection_type == 'typical interior' # noqa: PLR1714
or connection_type == 'top interior'
):
self.moment['Mpe1'] = steel.Ry * steel.Fy * left_beam.section['Zx']
@@ -375,10 +375,10 @@ def check_moment_column_face(self, connection_type):
"""This method checks whether the plastic moment is greater than the actual moment at column face.
(step 7 in ANSI Section 5.8)
:return: boolean result stored in is_feasible dictionary.
- """
+ """ # noqa: D205, D401, D404
phi_d = 1.0
if (
- connection_type == 'typical exterior'
+ connection_type == 'typical exterior' # noqa: PLR1714
or connection_type == 'top exterior'
):
if phi_d * self.moment['Mpe1'] >= self.moment['Mf1']:
@@ -389,7 +389,7 @@ def check_moment_column_face(self, connection_type):
)
self.is_feasible['flexural strength'] = False
elif (
- connection_type == 'typical interior'
+ connection_type == 'typical interior' # noqa: PLR1714
or connection_type == 'top interior'
):
if (
@@ -419,15 +419,15 @@ def check_shear_strength(
"""This method checks whether the beam shear strength is sufficient for the required shear strength.
(step 8 in ANSI Section 5.8)
:return: boolean result stored in is_feasible dictionary.
- """
+ """ # noqa: D205, D401, D404
wu = (
1.2 * (beam_dead_load * 0.001 / 12)
+ 0.5 * (beam_live_load * 0.001 / 12)
+ 0.2 * 0
)
- Sh = self.left_RBS_dimension['a'] + self.left_RBS_dimension['b'] / 2
+ Sh = self.left_RBS_dimension['a'] + self.left_RBS_dimension['b'] / 2 # noqa: N806
if (
- connection_type == 'typical exterior'
+ connection_type == 'typical exterior' # noqa: PLR1714
or connection_type == 'top exterior'
):
self.shear_force['Vu1'] = self.shear_force['VRBS1'] + wu * Sh
@@ -437,7 +437,7 @@ def check_shear_strength(
sys.stderr.write('Shear strength is not sufficient!\n')
self.is_feasible['shear strength'] = False
elif (
- connection_type == 'typical interior'
+ connection_type == 'typical interior' # noqa: PLR1714
or connection_type == 'top interior'
):
self.shear_force['Vu1'] = self.shear_force['VRBS1'] + wu * Sh
@@ -456,7 +456,7 @@ def check_shear_strength(
)
sys.exit(2)
- def check_column_beam_relationships(
+ def check_column_beam_relationships( # noqa: C901
self,
connection_type,
steel,
@@ -468,21 +468,21 @@ def check_column_beam_relationships(
"""This method examines whether the "strong-column-weak-beam" criteria is satisfied.
(step 11 in ANSI Section 5.8)
:return: boolean result stored in is_feasible dictionary.
- """
+ """ # noqa: D205, D401, D404
if connection_type == 'top exterior':
# For column in one-story building or top story:
# Strong column weak beam is exempted if the column axial load ratio < 0.3 for all load combinations except
# those using amplified seismic load.
# If not the case, still need to check the Mpc/Mpb ratio.
- if bottom_column.demand['axial'] / bottom_column.strength['axial'] < 0.3:
+ if bottom_column.demand['axial'] / bottom_column.strength['axial'] < 0.3: # noqa: PLR2004
self.is_feasible['SCWB'] = True
else:
- Puc_bot = bottom_column.demand['axial']
- Ag_bot = bottom_column.section['A']
+ Puc_bot = bottom_column.demand['axial'] # noqa: N806
+ Ag_bot = bottom_column.section['A'] # noqa: N806
ht_bot = (
bottom_column.unbraced_length['x'] * 12.2
) # Be cautious: convert the unit from ft to inch
- Zx_bot = bottom_column.section['Zx']
+ Zx_bot = bottom_column.section['Zx'] # noqa: N806
db = left_beam.section['d']
# Compute the moment summation for column
self.moment['Mpc'] = (
@@ -513,15 +513,15 @@ def check_column_beam_relationships(
# Strong column weak beam is exempted if the column axial load ratio < 0.3 for all load combinations except
# those using amplified seismic load.
# If not the case, still need to check the Mpc/Mpb ratio.
- if bottom_column.demand['axial'] / bottom_column.strength['axial'] < 0.3:
+ if bottom_column.demand['axial'] / bottom_column.strength['axial'] < 0.3: # noqa: PLR2004
self.is_feasible['SCWB'] = True
else:
- Puc_bot = bottom_column.demand['axial']
- Ag_bot = bottom_column.section['A']
+ Puc_bot = bottom_column.demand['axial'] # noqa: N806
+ Ag_bot = bottom_column.section['A'] # noqa: N806
h_bot = (
bottom_column.unbraced_length['x'] * 12.0
) # Be cautious: convert the unit from ft to inch
- Zx_bot = bottom_column.section['Zx']
+ Zx_bot = bottom_column.section['Zx'] # noqa: N806
# Generally the left and right beams have the identical beam sizes
db = (left_beam.section['d'] + right_beam.section['d']) / 2
# Compute the moment summation for column
@@ -554,18 +554,18 @@ def check_column_beam_relationships(
self.is_feasible['SCWB'] = False
elif connection_type == 'typical exterior':
# This connection has two columns and one beam
- Puc_top = top_column.demand['axial']
- Puc_bot = bottom_column.demand['axial']
- Ag_top = top_column.section['A']
- Ag_bot = bottom_column.section['A']
+ Puc_top = top_column.demand['axial'] # noqa: N806
+ Puc_bot = bottom_column.demand['axial'] # noqa: N806
+ Ag_top = top_column.section['A'] # noqa: N806
+ Ag_bot = bottom_column.section['A'] # noqa: N806
ht_top = (
top_column.unbraced_length['x'] * 12.0
) # Be cautious: convert the unit from ft to inch
ht_bot = (
bottom_column.unbraced_length['x'] * 12.0
) # Be cautious: convert the unit from ft to inch
- Zx_top = top_column.section['Zx']
- Zx_bot = bottom_column.section['Zx']
+ Zx_top = top_column.section['Zx'] # noqa: N806
+ Zx_bot = bottom_column.section['Zx'] # noqa: N806
db = left_beam.section['d']
# Compute the moment summation for column
self.moment['Mpc'] = Zx_top * (steel.Fy - Puc_top / Ag_top) * (
@@ -589,18 +589,18 @@ def check_column_beam_relationships(
self.is_feasible['SCWB'] = False
elif connection_type == 'typical interior':
# This connection has two columns and two beams
- Puc_top = top_column.demand['axial']
- Puc_bot = bottom_column.demand['axial']
- Ag_top = top_column.section['A']
- Ag_bot = bottom_column.section['A']
+ Puc_top = top_column.demand['axial'] # noqa: N806
+ Puc_bot = bottom_column.demand['axial'] # noqa: N806
+ Ag_top = top_column.section['A'] # noqa: N806
+ Ag_bot = bottom_column.section['A'] # noqa: N806
h_top = (
top_column.unbraced_length['x'] * 12.0
) # Be cautious: convert the unit from ft to inch
h_bot = (
bottom_column.unbraced_length['x'] * 12.0
) # Be cautious: convert the unit from ft to inch
- Zx_top = top_column.section['Zx']
- Zx_bot = bottom_column.section['Zx']
+ Zx_top = top_column.section['Zx'] # noqa: N806
+ Zx_bot = bottom_column.section['Zx'] # noqa: N806
# Generally the left and right beams have the identical beam sizes
db = (left_beam.section['d'] + right_beam.section['d']) / 2
# Compute the moment summation for column
@@ -644,7 +644,7 @@ def determine_doubler_plate(
):
"""This method determines the panel zone thickness (doubler plates).
:return: a scalar which denotes the doubler plate thickness.
- """
+ """ # noqa: D205, D401, D404
if connection_type == 'top exterior':
# Connection has one left beam and one bottom column
h_bot = (
@@ -734,10 +734,10 @@ def determine_doubler_plate(
def check_flag(self):
"""This method is used to test whether the connection passed all checks.
:return: a boolean variable indicating the connection is feasible or note.
- """
+ """ # noqa: D205, D401, D404
# Loop over each checking result to see if it is feasible or not
self.flag = True
- for key in self.is_feasible.keys():
- if self.is_feasible[key] == False:
+ for key in self.is_feasible.keys(): # noqa: SIM118
+ if self.is_feasible[key] == False: # noqa: E712
self.flag = False
return self.flag
diff --git a/modules/createSAM/AutoSDA/elastic_analysis.py b/modules/createSAM/AutoSDA/elastic_analysis.py
index 33947b0cb..772000f99 100644
--- a/modules/createSAM/AutoSDA/elastic_analysis.py
+++ b/modules/createSAM/AutoSDA/elastic_analysis.py
@@ -1,4 +1,4 @@
-# This file is used to include all user defined classes and functions
+# This file is used to include all user defined classes and functions # noqa: INP001, D100
# Developed by GUAN, XINGQUAN @ UCLA in June 2018
# Updated in Sept. 2018
@@ -37,9 +37,9 @@ class ElasticAnalysis:
(16) gravity and earthquake loads combination
(17) copy baseline .tcl files
(18) run OpenSees.exe
- """
+ """ # noqa: D205, D400, D404
- def __init__(self, building, for_drift_only=False, for_period_only=False):
+ def __init__(self, building, for_drift_only=False, for_period_only=False): # noqa: FBT002
"""This function is used to call all methods to write .tcl files required for an elastic analysis OpenSees model.
:param building: a class defined in "building_information.py" file
:param for_drift_only: a boolean variable.
@@ -48,7 +48,7 @@ def __init__(self, building, for_drift_only=False, for_period_only=False):
:param for_period_only: a boolean variable.
True means we only perform the eigen value analysis to obtain the period
Otherwise, all load types will be considered.
- """
+ """ # noqa: D205, D401, D404
# Change the working directory to folder where .tcl files will be saved
Path(building.directory['building elastic model']).mkdir(
parents=True, exist_ok=True
@@ -79,9 +79,9 @@ def __init__(self, building, for_drift_only=False, for_period_only=False):
# Call method to run OpenSees.exe for performing elastic analysis
self.run_OpenSees_program(building)
- def write_nodes(self, building):
+ def write_nodes(self, building): # noqa: D102
# Create a .tcl file and write the node information
- with open('DefineNodes2DModel.tcl', 'w') as tclfile:
+ with open('DefineNodes2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write(
'# This file will be used to define all nodes \n'
) # Introduce the file usage
@@ -89,15 +89,15 @@ def write_nodes(self, building):
tclfile.write('# Set bay width and story height \n')
tclfile.write(
- 'set\tBayWidth\t[expr %.2f*12]; \n'
+ 'set\tBayWidth\t[expr %.2f*12]; \n' # noqa: UP031
% (building.geometry['X bay width'])
)
tclfile.write(
- 'set\tFirstStory\t[expr %.2f*12]; \n'
+ 'set\tFirstStory\t[expr %.2f*12]; \n' # noqa: UP031
% (building.geometry['first story height'])
)
tclfile.write(
- 'set\tTypicalStory\t[expr %.2f*12]; \n\n\n'
+ 'set\tTypicalStory\t[expr %.2f*12]; \n\n\n' # noqa: UP031
% (building.geometry['typical story height'])
)
@@ -112,7 +112,7 @@ def write_nodes(self, building):
): # j is column label
tclfile.write('node\t%i%i%i' % (j, i, 1)) # Node label
tclfile.write('\t[expr %i*$BayWidth]' % (j - 1)) # X coordinate
- if i <= 2:
+ if i <= 2: # noqa: PLR2004
tclfile.write(
'\t[expr %i*$FirstStory];' % (i - 1)
) # Y coordinate
@@ -137,7 +137,7 @@ def write_nodes(self, building):
'\t[expr %i*$BayWidth]'
% (building.geometry['number of X bay'] + 1)
) # X coordinate
- if i <= 2:
+ if i <= 2: # noqa: PLR2004
tclfile.write(
'\t[expr %i*$FirstStory]; ' % (i - 1)
) # Y coordinate
@@ -193,9 +193,9 @@ def write_nodes(self, building):
'# puts "Extra nodes for leaning column springs defined"\n'
)
- def write_fixities(self, building):
+ def write_fixities(self, building): # noqa: D102
# Create a .tcl file to write boundary for the model
- with open('DefineFixities2DModel.tcl', 'w') as tclfile:
+ with open('DefineFixities2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write(
'# This file will be used to define the fixity at all column bases \n\n\n'
)
@@ -209,9 +209,9 @@ def write_fixities(self, building):
)
tclfile.write('# puts "All column base fixities have been defined"')
- def write_floor_constraint(self, building):
+ def write_floor_constraint(self, building): # noqa: D102
# Create a .tcl file to write floor constrain, i.e., equal DOF
- with open('DefineFloorConstraint2DModel.tcl', 'w') as tclfile:
+ with open('DefineFloorConstraint2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# This file will be used to define floor constraint \n\n')
tclfile.write(
'set\tConstrainDOF\t1;\t# Nodes at same floor level have identical lateral displacement \n\n'
@@ -233,9 +233,9 @@ def write_floor_constraint(self, building):
tclfile.write('\t# Pier 1 to Leaning column\n\n')
tclfile.write('# puts "Floor constraint defined"')
- def write_beam(self, building):
+ def write_beam(self, building): # noqa: D102
# Create a .tcl file to write beam elements
- with open('DefineBeams2DModel.tcl', 'w') as tclfile:
+ with open('DefineBeams2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# This file will be used to define beam elements \n\n\n')
tclfile.write('# Define beam section sizes \n')
for i in range(2, building.geometry['number of story'] + 2):
@@ -290,9 +290,9 @@ def write_beam(self, building):
tclfile.write('\n')
tclfile.write('# puts "Beams defined"')
- def write_column(self, building):
+ def write_column(self, building): # noqa: D102
# Create a .tcl file to define all column elements
- with open('DefineColumns2DModel.tcl', 'w') as tclfile:
+ with open('DefineColumns2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# This file will be used to define columns \n\n\n')
# Define exterior column sizes
@@ -388,9 +388,9 @@ def write_column(self, building):
tclfile.write('\t$AreaRigid\t$Es\t$IRigid\t$PDeltaTransf; \n\n')
tclfile.write('# puts "Columns defined"')
- def write_leaning_column_spring(self, building):
+ def write_leaning_column_spring(self, building): # noqa: D102
# Create a .tcl file to write all rotational springs for leaning column
- with open('DefineLeaningColumnSpring.tcl', 'w') as tclfile:
+ with open('DefineLeaningColumnSpring.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# This file will be used to define column hinges \n\n')
for i in range(2, building.geometry['number of story'] + 2):
# Spring below the floor level i
@@ -445,9 +445,9 @@ def write_leaning_column_spring(self, building):
tclfile.write('\n')
tclfile.write('# puts "Leaning column springs defined"')
- def write_mass(self, building):
+ def write_mass(self, building): # noqa: D102
# Create a .tcl file to write nodal mass
- with open('DefineMasses2DModel.tcl', 'w') as tclfile:
+ with open('DefineMasses2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# This file will be used to define all nodal masses \n\n')
# Write values for floor weights, tributary mass ratio, and nodal mass
@@ -496,9 +496,9 @@ def write_mass(self, building):
'# puts "Nodal mass defined"'
) # Write puts command which denotes the ending of the .tcl file
- def write_all_recorder(self):
+ def write_all_recorder(self): # noqa: D102
# Create a .tcl file to write all recorders for output
- with open('DefineAllRecorders2DModel.tcl', 'w') as tclfile:
+ with open('DefineAllRecorders2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write(
'# This file will be used to define all recorders \n\n\n'
) # File explanation
@@ -534,9 +534,9 @@ def write_all_recorder(self):
tclfile.write('cd\t$baseDir\n')
tclfile.write('# puts "All recorders defined"')
- def write_story_drift_recorder(self, building):
+ def write_story_drift_recorder(self, building): # noqa: D102
# Create a .tcl file to write story drift recorder for output
- with open('DefineStoryDriftRecorders2DModel.tcl', 'w') as tclfile:
+ with open('DefineStoryDriftRecorders2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define story drift recorders\n\n\n')
tclfile.write('cd\t$baseDir/$dataDir/StoryDrifts\n\n')
@@ -562,9 +562,9 @@ def write_story_drift_recorder(self, building):
)
tclfile.write('\t-dof\t1\t-perpDirn\t2; \n')
- def write_node_displacement_recorder(self, building):
+ def write_node_displacement_recorder(self, building): # noqa: D102
# Create a .tcl file to write node displacement recorder for output
- with open('DefineNodeDisplacementRecorders2DModel.tcl', 'w') as tclfile:
+ with open('DefineNodeDisplacementRecorders2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define node displacement recorders\n\n\n')
tclfile.write('cd\t$baseDir/$dataDir/NodeDisplacements\n\n')
# Write the node displacement recorder for node at each floor level
@@ -576,9 +576,9 @@ def write_node_displacement_recorder(self, building):
tclfile.write('\t%i%i%i' % (j, i, 1))
tclfile.write('\t-dof\t1\t2\t3\tdisp; \n')
- def write_beam_force_recorder(self, building):
+ def write_beam_force_recorder(self, building): # noqa: D102
# Create a .tcl file to write beam force recorder for output
- with open('DefineGlobalBeamForceRecorders2DModel.tcl', 'w') as tclfile:
+ with open('DefineGlobalBeamForceRecorders2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define global beam force recorders\n\n\n')
tclfile.write('cd\t$baseDir/$dataDir/GlobalBeamForces\n\n')
@@ -592,9 +592,9 @@ def write_beam_force_recorder(self, building):
tclfile.write('\t%i%i%i%i%i%i%i' % (2, j, i, 1, j + 1, i, 1))
tclfile.write('\tforce; \n')
- def write_column_force_recorder(self, building):
+ def write_column_force_recorder(self, building): # noqa: D102
# Create a .tcl file to write column force recorder for output
- with open('DefineGlobalColumnForceRecorders2DModel.tcl', 'w') as tclfile:
+ with open('DefineGlobalColumnForceRecorders2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define global column force recorders\n\n\n')
tclfile.write('cd\t$baseDir/$dataDir/GlobalColumnForces\n\n')
@@ -610,9 +610,9 @@ def write_column_force_recorder(self, building):
tclfile.write('\t%i%i%i%i%i%i%i' % (3, j, i, 1, j, i + 1, 1))
tclfile.write('\tforce;\n')
- def write_gravity_dead_load(self, building):
+ def write_gravity_dead_load(self, building): # noqa: D102
# Create a .tcl file that writes the gravity dead load on the model
- with open('DefineGravityDeadLoads2DModel.tcl', 'w') as tclfile:
+ with open('DefineGravityDeadLoads2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define gravity dead loads\n\n\n')
# Assign the beam dead load values
@@ -711,9 +711,9 @@ def write_gravity_dead_load(self, building):
tclfile.write('# puts "Dead load defined"')
- def write_gravity_live_load(self, building):
+ def write_gravity_live_load(self, building): # noqa: D102
# Create a .tcl file to write live load
- with open('DefineGravityLiveLoads2DModel.tcl', 'w') as tclfile:
+ with open('DefineGravityLiveLoads2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define gravity live loads\n\n\n')
# Assign the beam dead load values
@@ -812,9 +812,9 @@ def write_gravity_live_load(self, building):
tclfile.write('# puts "Live load defined"')
- def write_earthquake_load(self, building):
+ def write_earthquake_load(self, building): # noqa: D102
# Create a .tcl file to write earthquake load
- with open('DefineEarthquakeLaterLoads2DModel.tcl', 'w') as tclfile:
+ with open('DefineEarthquakeLaterLoads2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define gravity live loads\n\n\n')
# Assign the beam dead load values
@@ -899,10 +899,10 @@ def write_earthquake_load(self, building):
tclfile.write('\n}\n')
tclfile.write('# puts "Earthquake load defined"')
- def write_gravity_earthquake_load(self, building):
+ def write_gravity_earthquake_load(self, building): # noqa: D102
# Create a .tcl file to write the combination of earthquake and gravity loads
# This load case is used to calculate story drift
- with open('DefineGravityEarthquakeLoads2DModel.tcl', 'w') as tclfile:
+ with open('DefineGravityEarthquakeLoads2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define gravity live loads\n\n\n')
# Assign the beam dead load values
@@ -1019,7 +1019,7 @@ def write_gravity_earthquake_load(self, building):
def copy_baseline_files(self, building, for_drift_only, for_period_only):
"""Some .tcl files are fixed, i.e., no need to change for different OpenSees models.
Therefore, just copy these .tcl files from the baseline folder
- """
+ """ # noqa: D205, D400, D401
# define a list which includes all baseline files' names
file_list = [
'Database.csv',
@@ -1051,21 +1051,21 @@ def copy_baseline_files(self, building, for_drift_only, for_period_only):
)
new_string_for_drift = '[list GravityEarthquake]'
if for_drift_only:
- with open('Model.tcl') as file:
+ with open('Model.tcl') as file: # noqa: PTH123
content = file.read()
new_content = content.replace(old_string, new_string_for_drift)
- with open('Model.tcl', 'w') as file:
+ with open('Model.tcl', 'w') as file: # noqa: PTH123
file.write(new_content)
# Revise "Model.tcl" file if we only want to obtain period
new_string_for_period = '[list EigenValue]'
if for_period_only:
- with open('Model.tcl') as file:
+ with open('Model.tcl') as file: # noqa: PTH123
content = file.read()
new_content = content.replace(old_string, new_string_for_period)
- with open('Model.tcl', 'w') as file:
+ with open('Model.tcl', 'w') as file: # noqa: PTH123
file.write(new_content)
- def run_OpenSees_program(self, building):
+ def run_OpenSees_program(self, building): # noqa: ARG002, N802, D102
# This method is used to run the "RunModel.bat" file. OpenSees.exe program is thus run.
cmd = 'OpenSees Model.tcl'
- subprocess.Popen(cmd, shell=True).wait()
+ subprocess.Popen(cmd, shell=True).wait() # noqa: S602
diff --git a/modules/createSAM/AutoSDA/elastic_output.py b/modules/createSAM/AutoSDA/elastic_output.py
index d57eb90f1..a635e15df 100644
--- a/modules/createSAM/AutoSDA/elastic_output.py
+++ b/modules/createSAM/AutoSDA/elastic_output.py
@@ -1,4 +1,4 @@
-# This file is used to define the class of Building
+# This file is used to define the class of Building # noqa: INP001, D100
# Developed by GUAN, XINGQUAN @ UCLA in Aug. 2018
# Updated on Sept. 28 2018
@@ -36,7 +36,7 @@ class ElasticOutput:
Load combination #5: (0.9 - 0.2SDS)D + rho*E
Load combination #6: (0.9 - 0.2SDS)D - rho*E
(5) Determine governing load cases
- """
+ """ # noqa: D205, D400, D404
def __init__(self, building):
# Initialize attributes of elastic_output class
@@ -65,7 +65,7 @@ def read_raw_load(self, building):
dead load, live load or earthquake load
:param building: user-defined class in "building_information.py" file
:return: a dictionary which contains load demands under three load scenarios
- """
+ """ # noqa: D205, D400, D401, D404
for load_type in LOAD_TYPE:
# Define the directory where the column force output is stored
path_output = (
@@ -119,10 +119,10 @@ def read_raw_load(self, building):
# Store beam forces based on load scenario
self.raw_beam_load[load_type] = beam_load
- def extract_column_load(self):
+ def extract_column_load(self): # noqa: D102
# Extract axial force, shear force, and moment from the variable obtained in the previous step
# Forces at both ends of columns are stored
- N = self.raw_column_load['DeadLoad'].shape[1]
+ N = self.raw_column_load['DeadLoad'].shape[1] # noqa: N806
axial_index = range(
1, N, 3
) # In column matrix, axial force is in column #2, 5, 8, ...
@@ -156,10 +156,10 @@ def extract_column_load(self):
'column moment': moment,
}
- def extract_beam_load(self):
+ def extract_beam_load(self): # noqa: D102
# Extract shear and moment from variables obtained in previous step
# Forces at both ends of beams are stored
- N = self.raw_beam_load['DeadLoad'].shape[1]
+ N = self.raw_beam_load['DeadLoad'].shape[1] # noqa: N806
axial_index = range(
0, N, 3
) # In beam matrix, axial force is in column #1, 4, 7, ...
@@ -187,12 +187,12 @@ def extract_beam_load(self):
self.earthquake_load_case['beam shear'] = shear_force
self.earthquake_load_case['beam moment'] = moment
- def perform_load_combination(self, building):
+ def perform_load_combination(self, building): # noqa: C901
"""This method is used to perform the load combinations, which will be used to extract the dominate load.
There are six load combinations in total according to ASCE 7-10.
:param building: user-defined class in "building_information.py" file
:return: six dictionaries which individually represents a single load combination result.
- """
+ """ # noqa: D205, D401, D404
# Load combination 1: 1.4*D
for force in self.dead_load_case:
self.load_combination_1[force] = 1.4 * self.dead_load_case[force]
@@ -205,7 +205,7 @@ def perform_load_combination(self, building):
# Load combination 3: (1.2*D + 0.2*SDS) + 1.0(0.5)*L + rho*E
# For Load combination 3 through 6, omega should be used to replace with rho for column axial force
- SDS = building.elf_parameters['SDS']
+ SDS = building.elf_parameters['SDS'] # noqa: N806
rho = 1.0
omega = 3.0
for force in self.dead_load_case:
@@ -271,11 +271,11 @@ def determine_dominate_load(self):
"""This method is used to determine the governing load for beam and column components.
:return: a dictionary which includes all six keys and associated matrices.
six keys: column axial, column shear, column moment, beam axial, beam shear, beam moment
- """
+ """ # noqa: D205, D400, D401, D404
dominate_load = {}
# Find the maximum load demand among six load cases
- for force in self.load_combination_1.keys():
- M, N = self.load_combination_1[force].shape
+ for force in self.load_combination_1.keys(): # noqa: SIM118
+ M, N = self.load_combination_1[force].shape # noqa: N806
dominate_load[force] = np.zeros([M, N])
for m in range(M):
for n in range(N):
diff --git a/modules/createSAM/AutoSDA/global_variables.py b/modules/createSAM/AutoSDA/global_variables.py
index 23beaf826..a0a1b0cf8 100644
--- a/modules/createSAM/AutoSDA/global_variables.py
+++ b/modules/createSAM/AutoSDA/global_variables.py
@@ -1,4 +1,4 @@
-# This file is used to declare all global constants.
+# This file is used to declare all global constants. # noqa: INP001, D100
# All user input parameters are summarized here.
# Developed by GUAN, XINGQUAN @ UCLA in Feb 2019
# Be cautious with line 19 - 25:
@@ -25,7 +25,7 @@
) # Unit: ksi
# The path where THIS file is located is the base directory
-baseDirectory = os.path.dirname(os.path.realpath(__file__))
+baseDirectory = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N816
##########################################################################
# User Defined Ratios Involved in Design #
diff --git a/modules/createSAM/AutoSDA/help_functions.py b/modules/createSAM/AutoSDA/help_functions.py
index b8fb481f8..1c5f8f8c1 100644
--- a/modules/createSAM/AutoSDA/help_functions.py
+++ b/modules/createSAM/AutoSDA/help_functions.py
@@ -1,4 +1,4 @@
-# This file is used to define helpful functions that are used in either main program or user defined class
+# This file is used to define helpful functions that are used in either main program or user defined class # noqa: INP001, D100
# Developed by GUAN, XINGQUAN @ UCLA in June 2018
# Updated in Sept. 2018
@@ -9,95 +9,95 @@
from global_variables import SECTION_DATABASE
-def determine_Fa_coefficient(site_class, Ss):
+def determine_Fa_coefficient(site_class, Ss): # noqa: C901, N802, N803
"""This function is used to determine Fa coefficient, which is based on ASCE 7-10 Table 11.4-1
:param Ss: a scalar given in building class
:param site_class: a string: 'A', 'B', 'C', 'D', or 'E' given in building information
:return: a scalar which is Fa coefficient
- """
+ """ # noqa: D205, D400, D401, D404
if site_class == 'A':
- Fa = 0.8
+ Fa = 0.8 # noqa: N806
elif site_class == 'B':
- Fa = 1.0
+ Fa = 1.0 # noqa: N806
elif site_class == 'C':
- if Ss <= 0.5:
- Fa = 1.2
+ if Ss <= 0.5: # noqa: PLR2004
+ Fa = 1.2 # noqa: N806
elif Ss <= 1.0:
- Fa = 1.2 - 0.4 * (Ss - 0.5)
+ Fa = 1.2 - 0.4 * (Ss - 0.5) # noqa: N806
else:
- Fa = 1.0
+ Fa = 1.0 # noqa: N806
elif site_class == 'D':
- if Ss <= 0.25:
- Fa = 1.6
- elif Ss <= 0.75:
- Fa = 1.6 - 0.8 * (Ss - 0.25)
- elif Ss <= 1.25:
- Fa = 1.2 - 0.4 * (Ss - 0.75)
+ if Ss <= 0.25: # noqa: PLR2004
+ Fa = 1.6 # noqa: N806
+ elif Ss <= 0.75: # noqa: PLR2004
+ Fa = 1.6 - 0.8 * (Ss - 0.25) # noqa: N806
+ elif Ss <= 1.25: # noqa: PLR2004
+ Fa = 1.2 - 0.4 * (Ss - 0.75) # noqa: N806
else:
- Fa = 1.0
+ Fa = 1.0 # noqa: N806
elif site_class == 'E':
- if Ss <= 0.25:
- Fa = 2.5
- elif Ss <= 0.5:
- Fa = 2.5 - 3.2 * (Ss - 0.25)
- elif Ss <= 0.75:
- Fa = 1.7 - 2.0 * (Ss - 0.5)
+ if Ss <= 0.25: # noqa: PLR2004
+ Fa = 2.5 # noqa: N806
+ elif Ss <= 0.5: # noqa: PLR2004
+ Fa = 2.5 - 3.2 * (Ss - 0.25) # noqa: N806
+ elif Ss <= 0.75: # noqa: PLR2004
+ Fa = 1.7 - 2.0 * (Ss - 0.5) # noqa: N806
elif Ss <= 1.0:
- Fa = 1.2 - 1.2 * (Ss - 0.75)
+ Fa = 1.2 - 1.2 * (Ss - 0.75) # noqa: N806
else:
- Fa = 0.9
+ Fa = 0.9 # noqa: N806
else:
- Fa = None
- print('Site class is entered with an invalid value')
+ Fa = None # noqa: N806
+ print('Site class is entered with an invalid value') # noqa: T201
return Fa
-def determine_Fv_coefficient(site_class, S1):
+def determine_Fv_coefficient(site_class, S1): # noqa: C901, N802, N803
"""This function is used to determine Fv coefficient, which is based on ASCE 7-10 Table 11.4-2
:param S1: a scalar given in building class
:param site_class: a string 'A', 'B', 'C', 'D' or 'E' given in building class
:return: a scalar which is Fv coefficient
- """
+ """ # noqa: D205, D400, D401, D404
if site_class == 'A':
- Fv = 0.8
+ Fv = 0.8 # noqa: N806
elif site_class == 'B':
- Fv = 1.0
+ Fv = 1.0 # noqa: N806
elif site_class == 'C':
- if S1 <= 0.1:
- Fv = 1.7
- elif S1 <= 0.5:
- Fv = 1.7 - 1.0 * (S1 - 0.1)
+ if S1 <= 0.1: # noqa: PLR2004
+ Fv = 1.7 # noqa: N806
+ elif S1 <= 0.5: # noqa: PLR2004
+ Fv = 1.7 - 1.0 * (S1 - 0.1) # noqa: N806
else:
- Fv = 1.3
+ Fv = 1.3 # noqa: N806
elif site_class == 'D':
- if S1 <= 0.1:
- Fv = 2.4
- elif S1 <= 0.2:
- Fv = 2.4 - 4 * (S1 - 0.1)
- elif S1 <= 0.4:
- Fv = 2.0 - 2 * (S1 - 0.2)
- elif S1 <= 0.5:
- Fv = 1.6 - 1 * (S1 - 0.4)
+ if S1 <= 0.1: # noqa: PLR2004
+ Fv = 2.4 # noqa: N806
+ elif S1 <= 0.2: # noqa: PLR2004
+ Fv = 2.4 - 4 * (S1 - 0.1) # noqa: N806
+ elif S1 <= 0.4: # noqa: PLR2004
+ Fv = 2.0 - 2 * (S1 - 0.2) # noqa: N806
+ elif S1 <= 0.5: # noqa: PLR2004
+ Fv = 1.6 - 1 * (S1 - 0.4) # noqa: N806
else:
- Fv = 1.5
+ Fv = 1.5 # noqa: N806
elif site_class == 'E':
- if S1 <= 0.1:
- Fv = 3.5
- elif S1 <= 0.2:
- Fv = 3.5 - 3 * (S1 - 0.1)
- elif S1 <= 0.4:
- Fv = 3.2 - 4 * (S1 - 0.2)
+ if S1 <= 0.1: # noqa: PLR2004
+ Fv = 3.5 # noqa: N806
+ elif S1 <= 0.2: # noqa: PLR2004
+ Fv = 3.5 - 3 * (S1 - 0.1) # noqa: N806
+ elif S1 <= 0.4: # noqa: PLR2004
+ Fv = 3.2 - 4 * (S1 - 0.2) # noqa: N806
else:
- Fv = 2.4
+ Fv = 2.4 # noqa: N806
else:
- Fv = None
- print('Site class is entered with an invalid value')
+ Fv = None # noqa: N806
+ print('Site class is entered with an invalid value') # noqa: T201
return Fv
-def calculate_DBE_acceleration(Ss, S1, Fa, Fv):
+def calculate_DBE_acceleration(Ss, S1, Fa, Fv): # noqa: N802, N803
"""This function is used to calculate design spectrum acceleration parameters,
which is based ASCE 7-10 Section 11.4
Note: All notations for these variables can be found in ASCE 7-10.
@@ -106,32 +106,32 @@ def calculate_DBE_acceleration(Ss, S1, Fa, Fv):
:param Fa: a scalar computed from determine_Fa_coefficient
:param Fv: a scalar computed from determine_Fv_coefficient
:return: SMS, SM1, SDS, SD1: four scalars which are required for lateral force calculation
- """
- SMS = Fa * Ss
- SM1 = Fv * S1
- SDS = 2 / 3 * SMS
- SD1 = 2 / 3 * SM1
+ """ # noqa: D205, D400, D401, D404
+ SMS = Fa * Ss # noqa: N806
+ SM1 = Fv * S1 # noqa: N806
+ SDS = 2 / 3 * SMS # noqa: N806
+ SD1 = 2 / 3 * SM1 # noqa: N806
return SMS, SM1, SDS, SD1
-def determine_Cu_coefficient(SD1):
+def determine_Cu_coefficient(SD1): # noqa: N802, N803
"""This function is used to determine Cu coefficient, which is based on ASCE 7-10 Table 12.8-1
Note: All notations for these variables can be found in ASCE 7-10.
:param SD1: a scalar calculated from function determine_DBE_acceleration
:return: Cu: a scalar
- """
- if SD1 <= 0.1:
- Cu = 1.7
- elif SD1 <= 0.15:
- Cu = 1.7 - 2 * (SD1 - 0.1)
- elif SD1 <= 0.2:
- Cu = 1.6 - 2 * (SD1 - 0.15)
- elif SD1 <= 0.3:
- Cu = 1.5 - 1 * (SD1 - 0.2)
- elif SD1 <= 0.4:
- Cu = 1.4
+ """ # noqa: D205, D400, D401, D404
+ if SD1 <= 0.1: # noqa: PLR2004
+ Cu = 1.7 # noqa: N806
+ elif SD1 <= 0.15: # noqa: PLR2004
+ Cu = 1.7 - 2 * (SD1 - 0.1) # noqa: N806
+ elif SD1 <= 0.2: # noqa: PLR2004
+ Cu = 1.6 - 2 * (SD1 - 0.15) # noqa: N806
+ elif SD1 <= 0.3: # noqa: PLR2004
+ Cu = 1.5 - 1 * (SD1 - 0.2) # noqa: N806
+ elif SD1 <= 0.4: # noqa: PLR2004
+ Cu = 1.4 # noqa: N806
else:
- Cu = 1.4
+ Cu = 1.4 # noqa: N806
return Cu
@@ -149,12 +149,12 @@ def determine_floor_height(
:param typical_story_height: a scalar which describes the typical story height for other stories
except 1st story
:return: an array which includes the height for each floor level (ground to roof)
- """
+ """ # noqa: D205, D400, D401, D404
floor_height = np.zeros([number_of_story + 1, 1])
for level in range(1, number_of_story + 2):
if level == 1:
floor_height[level - 1] = 0
- elif level == 2:
+ elif level == 2: # noqa: PLR2004
floor_height[level - 1] = 0 + first_story_height
else:
floor_height[level - 1] = first_story_height + typical_story_height * (
@@ -164,7 +164,7 @@ def determine_floor_height(
return floor_height
-def calculate_Cs_coefficient(SDS, SD1, S1, T, TL, R, Ie):
+def calculate_Cs_coefficient(SDS, SD1, S1, T, TL, R, Ie): # noqa: N802, N803
"""This function is used to calculate the seismic response coefficient based on ASCE 7-10 Section 12.8.1
Unit: kips, g (gravity constant), second
Note: All notations for these variables can be found in ASCE 7-10.
@@ -177,38 +177,38 @@ def calculate_Cs_coefficient(SDS, SD1, S1, T, TL, R, Ie):
:param R: a scalar given in building information
:param Ie: a scalar given in building information
:return: Cs: seismic response coefficient; determined using Equations 12.8-2 to 12.8-6
- """
+ """ # noqa: D205, D400, D401, D404
# Equation 12.8-2
- Cs_initial = SDS / (R / Ie)
+ Cs_initial = SDS / (R / Ie) # noqa: N806
# Equation 12.8-3 or 12.8-4, Cs coefficient should not exceed the following value
if T <= TL:
- Cs_upper = SD1 / (T * (R / Ie))
+ Cs_upper = SD1 / (T * (R / Ie)) # noqa: N806
else:
- Cs_upper = SD1 * TL / (T**2 * (R / Ie))
+ Cs_upper = SD1 * TL / (T**2 * (R / Ie)) # noqa: N806
# Equation 12.8-2 results shall be smaller than upper bound of Cs
if Cs_initial <= Cs_upper:
- Cs = Cs_initial
+ Cs = Cs_initial # noqa: N806
else:
- Cs = Cs_upper
+ Cs = Cs_upper # noqa: N806
# Equation 12.8-5, Cs shall not be less than the following value
- Cs_lower_1 = np.max([0.044 * SDS * Ie, 0.01])
+ Cs_lower_1 = np.max([0.044 * SDS * Ie, 0.01]) # noqa: N806
# Compare the Cs value with lower bound
if Cs >= Cs_lower_1:
pass
else:
- Cs = Cs_lower_1
+ Cs = Cs_lower_1 # noqa: N806
# Equation 12.8-6. if S1 is equal to or greater than 0.6g, Cs shall not be less than the following value
- if S1 >= 0.6:
- Cs_lower_2 = 0.5 * S1 / (R / Ie)
+ if S1 >= 0.6: # noqa: PLR2004
+ Cs_lower_2 = 0.5 * S1 / (R / Ie) # noqa: N806
if Cs >= Cs_lower_2:
pass
else:
- Cs = Cs_lower_2
+ Cs = Cs_lower_2 # noqa: N806
else:
pass
@@ -219,10 +219,10 @@ def determine_k_coeficient(period):
"""This function is used to determine the coefficient k based on ASCE 7-10 Section 12.8.3
:param period: building period;
:return: k: a scalar will be used in Equation 12.8-12 in ASCE 7-10
- """
- if period <= 0.5:
+ """ # noqa: D205, D400, D401, D404
+ if period <= 0.5: # noqa: PLR2004
k = 1
- elif period >= 2.5:
+ elif period >= 2.5: # noqa: PLR2004
k = 2
else:
k = 1 + 0.5 * (period - 0.5)
@@ -238,13 +238,13 @@ def calculate_seismic_force(base_shear, floor_weight, floor_height, k):
:param floor_height: a vector with a length of (number_of_story+1)
:param k: a scalar given by "determine_k_coefficient"
:return: Fx: a vector describes the lateral force for each floor level
- """
+ """ # noqa: D205, D400, D401, D404
# Calculate the product of floor weight and floor height
# Note that floor height includes ground floor, which will not be used in the actual calculation.
# Ground floor is stored here for completeness.
weight_floor_height = floor_weight * floor_height[1:, 0] ** k
# Equation 12.8-12 in ASCE 7-10
- Cvx = weight_floor_height / np.sum(weight_floor_height)
+ Cvx = weight_floor_height / np.sum(weight_floor_height) # noqa: N806
# Calculate the seismic story force
seismic_force = Cvx * base_shear
# Calculate the shear force for each story: from top story to bottom story
@@ -260,14 +260,14 @@ def find_section_candidate(target_depth, section_database):
:param target_depth: a string which defines the depth of columns or beams, e.g. W14
:param section_database: a dataframe read from SMF_Section_Property.csv in Library folder
:return: a pandas Series of strings which denotes all possible sizes based on depth
- """
+ """ # noqa: D205, D400, D401, D404
candidate_index = []
for indx in section_database['index']:
match = re.search(target_depth, section_database.loc[indx, 'section size'])
if match:
candidate_index.append(indx)
candidates = section_database.loc[candidate_index, 'section size']
- return candidates
+ return candidates # noqa: RET504
def search_member_size(target_name, target_quantity, candidate, section_database):
@@ -278,7 +278,7 @@ def search_member_size(target_name, target_quantity, candidate, section_database
:param candidate: a list of strings which defines potential section sizes for beams or columns
:param section_database: a dataframe read from "Library" SMF_Section_Property.csv
:return: a string which states the member sizes (e.g., W14X730)
- """
+ """ # noqa: D205, D400, D401, D404
# Find the index for the candidate
candidate_index = list(
section_database.loc[
@@ -308,7 +308,7 @@ def search_section_property(target_size, section_database):
:param target_size: a string which defines section size, e.g. 'W14X500'
:param section_database: a dataframe read from SMF_Section_Property.csv in "Library" folder
:return: section_info: a dictionary which includes section size, index, and associated properties.
- """
+ """ # noqa: D205, D401, D404
# Loop over the sections in the SMF section database and find the one which matches the target size
# Then the property of the target section is returned as a dictionary.
# If target size cannot match any existing sizes in database, a warning message should be given.
@@ -317,7 +317,7 @@ def search_section_property(target_size, section_database):
if target_size == section_database.loc[indx, 'section size']:
section_info = section_database.loc[indx, :]
return section_info.to_dict()
- except:
+ except: # noqa: E722
sys.stderr.write(
'Error: wrong size nominated!\nNo such size exists in section database!'
)
@@ -329,7 +329,7 @@ def decrease_member_size(candidate, current_size):
:param candidate: a list of strings which defines the possible sizes
:param current_size: a string which defines current member size
:return: optimized_size: a string which defines the member size after decrease
- """
+ """ # noqa: D205, D400, D401, D404
# Find the index of the current section size in candidate pool and move it to the next one
candidate_pool_index = candidate.index(current_size)
if candidate_pool_index + 1 > len(candidate):
@@ -343,7 +343,7 @@ def extract_depth(size):
"""This function is used to extract the depth of a section size when a size string is given.
:param size: a string denoting a member size, e.g. 'W14X550'
:return: a integer which denotes the depth of section. e.g. 'W14X550' ==> 14
- """
+ """ # noqa: D205, D400, D401, D404
# Use Python regular expression to extract the char between 'W' and 'X', which then become depth
output = re.findall(r'.*W(.*)X.*', size)
return int(output[0])
@@ -353,14 +353,14 @@ def extract_weight(size):
"""This function is used to extract the weight of a section size when a size string is given.
:param size: a string denoting a member size, e.g. 'W14X550'
:return: a integer which denotes the weight of the section, e.g. 'W14X550' ==> 550
- """
+ """ # noqa: D205, D400, D401, D404
# Use Python regular expression to extract the char after 'W' to the end of the string,
# which then becomes weight
output = re.findall(r'.X(.*)', size)
return int(output[0])
-def constructability_helper(
+def constructability_helper( # noqa: C901
section_size,
identical_size_per_story,
total_story,
@@ -374,7 +374,7 @@ def constructability_helper(
:param sorted_quantity:a string to indicate the members are sorted based on which quantity,
options: 'Ix' or 'Zx'
:return: a list whose every adjacent N stories have same strings and the whole list is in descending order
- """
+ """ # noqa: D205, D400, D401, D404, RUF002
# Determine the number of stories that have the identical member size for constructability
if identical_size_per_story > total_story:
per_story = total_story
@@ -384,7 +384,7 @@ def constructability_helper(
variation_story = []
for i in range(total_story):
if i % per_story == 0:
- variation_story.append(i)
+ variation_story.append(i) # noqa: PERF401
# Pre-process the section size list:
# Sometimes, we may have the following case for the section list (M < N < K)
# Story N has larger depth than M and K, but it has smaller Ix or Zx than M.
@@ -401,7 +401,7 @@ def constructability_helper(
temp_property = []
# Find the maximum Ix or Zx in current story chunk
for k in range(i, j):
- temp_property.append(
+ temp_property.append( # noqa: PERF401
search_section_property(section_size[k], SECTION_DATABASE)[
sorted_quantity
]
@@ -590,7 +590,7 @@ def increase_member_size(candidate, current_size):
:param candidate: a list of strings which defines the possible sizes
:param current_size: a string which denotes current member size
:return: a string which denotes the member size after one step upward
- """
+ """ # noqa: D205, D400, D401, D404
# Find the index of current section size in candidate pool and move it to previous one
candidate_pool_index = candidate.index(current_size)
if candidate_pool_index - 1 < 0: # Make sure the index does not exceed the bound
diff --git a/modules/createSAM/AutoSDA/main_design.py b/modules/createSAM/AutoSDA/main_design.py
index 11a90c8a6..c51f3274d 100644
--- a/modules/createSAM/AutoSDA/main_design.py
+++ b/modules/createSAM/AutoSDA/main_design.py
@@ -1,4 +1,4 @@
-# This file is the main file that calls function to perform seismic design
+# This file is the main file that calls function to perform seismic design # noqa: INP001, D100
# Users need to specify the system argument in this file.
# Users also need to specify the variables in "global_variables.py"
@@ -35,13 +35,13 @@
# ********************* Single Building Case Ends Here *******************
IDs = [11]
-for id in IDs:
+for id in IDs: # noqa: A001
building_id = 'Test' + str(id)
- print('Design for Building ID = ', building_id)
+ print('Design for Building ID = ', building_id) # noqa: T201
seismic_design(building_id, baseDirectory)
# ********************* Single Building Case Ends Here *******************
end_time = time.time()
-print('Running time is: %s seconds' % round(end_time - start_time, 2))
+print('Running time is: %s seconds' % round(end_time - start_time, 2)) # noqa: T201, UP031
diff --git a/modules/createSAM/AutoSDA/main_generation.py b/modules/createSAM/AutoSDA/main_generation.py
index f5f203fd0..d09cfcb2a 100644
--- a/modules/createSAM/AutoSDA/main_generation.py
+++ b/modules/createSAM/AutoSDA/main_generation.py
@@ -1,4 +1,4 @@
-# This file is the main file that calls functions to generate the nonlinear
+# This file is the main file that calls functions to generate the nonlinear # noqa: INP001, D100
# OpenSees models
# Users need to specify the system argument in this file.
# Users also need to specify the variables in "global_variables.py"
@@ -25,9 +25,9 @@
##########################################################################
IDs = [11]
-for id in IDs:
+for id in IDs: # noqa: A001
building_id = 'Test' + str(id)
- print(building_id)
+ print(building_id) # noqa: T201
model_generation(building_id, baseDirectory)
##########################################################################
diff --git a/modules/createSAM/AutoSDA/main_program.py b/modules/createSAM/AutoSDA/main_program.py
index 7d2b68351..8417fd4a5 100644
--- a/modules/createSAM/AutoSDA/main_program.py
+++ b/modules/createSAM/AutoSDA/main_program.py
@@ -1,4 +1,4 @@
-# Modified by: Stevan Gavrilovic @ SimCenter, UC Berkeley
+# Modified by: Stevan Gavrilovic @ SimCenter, UC Berkeley # noqa: INP001, D100
# Last revision: 09/2020
##########################################################################
@@ -26,45 +26,45 @@
from seismic_design import seismic_design
-def main(BIM_file, EVENT_file, SAM_file, model_file, filePath, getRV):
+def main(BIM_file, EVENT_file, SAM_file, model_file, filePath, getRV): # noqa: ARG001, C901, N803, D103
start_time = time.time()
# Get the current directory
- workingDirectory = os.getcwd()
+ workingDirectory = os.getcwd() # noqa: PTH109, N806
- rootSIM = {}
+ rootSIM = {} # noqa: N806
# Try to open the BIM json
- with open(BIM_file, encoding='utf-8') as f:
- rootBIM = json.load(f)
+ with open(BIM_file, encoding='utf-8') as f: # noqa: PTH123
+ rootBIM = json.load(f) # noqa: N806
try:
- rootSIM = rootBIM['Modeling']
- except:
- raise ValueError('AutoSDA - structural information missing')
+ rootSIM = rootBIM['Modeling'] # noqa: N806
+ except: # noqa: E722
+ raise ValueError('AutoSDA - structural information missing') # noqa: B904, EM101, TRY003
# Extract the path for the directory containing the folder with the building data .csv files
# pathDataFolder = rootSIM['pathDataFolder']
- pathDataFolder = os.path.join(os.getcwd(), rootSIM['folderName'])
+ pathDataFolder = os.path.join(os.getcwd(), rootSIM['folderName']) # noqa: PTH109, PTH118, N806
# pathDataFolder = workingDirectory + "/" + rootSIM['folderName']
# Get the random variables from the input file
try:
- rootRV = rootBIM['randomVariables']
- except:
- raise ValueError('AutoSDA - randomVariables section missing')
+ rootRV = rootBIM['randomVariables'] # noqa: N806
+ except: # noqa: E722
+ raise ValueError('AutoSDA - randomVariables section missing') # noqa: B904, EM101, TRY003
# Populate the RV array with name/value pairs.
# If a random variable is used here, the RV array will contain its current value
for rv in rootRV:
# Try to get the name and value of the random variable
- rvName = rv['name']
- curVal = rv['value']
+ rvName = rv['name'] # noqa: N806
+ curVal = rv['value'] # noqa: N806
# Check if the current value a realization of a RV, i.e., is not a RV label
# If so, then set the current value as the mean
if 'RV' in str(curVal):
- curVal = float(rv['mean'])
+ curVal = float(rv['mean']) # noqa: N806
RV_ARRAY[rvName] = curVal
@@ -73,13 +73,13 @@ def main(BIM_file, EVENT_file, SAM_file, model_file, filePath, getRV):
if getRV is False:
# *********************** Design Starts Here *************************
- print('Starting seismic design')
+ print('Starting seismic design') # noqa: T201
seismic_design(baseDirectory, pathDataFolder, workingDirectory)
- print('Seismic design complete')
+ print('Seismic design complete') # noqa: T201
# ******************* Nonlinear Model Generation Starts Here ******
# Nonlinear .tcl models are generated for EigenValue, Pushover, and Dynamic Analysis
- print('Generating nonlinear model')
+ print('Generating nonlinear model') # noqa: T201
model_generation(baseDirectory, pathDataFolder, workingDirectory)
# ******************* Perform Eigen Value Analysis ****************
@@ -96,13 +96,13 @@ def main(BIM_file, EVENT_file, SAM_file, model_file, filePath, getRV):
# os.chdir(target_model)
# subprocess.Popen("OpenSees Model.tcl", shell=True).wait()
- print('The design and model construction has been accomplished.')
+ print('The design and model construction has been accomplished.') # noqa: T201
end_time = time.time()
- print('Running time is: %s seconds' % round(end_time - start_time, 2))
+ print('Running time is: %s seconds' % round(end_time - start_time, 2)) # noqa: T201, UP031
# Now create the SAM file for export
- root_SAM = {}
+ root_SAM = {} # noqa: N806
root_SAM['mainScript'] = 'Model.tcl'
root_SAM['type'] = 'OpenSeesInput'
@@ -120,7 +120,7 @@ def main(BIM_file, EVENT_file, SAM_file, model_file, filePath, getRV):
root_SAM['ndf'] = 3
# Get the number of stories
- numStories = rootSIM['numStories']
+ numStories = rootSIM['numStories'] # noqa: N806
node_map = []
# Using nodes on column #1 to calculate story drift
@@ -128,14 +128,14 @@ def main(BIM_file, EVENT_file, SAM_file, model_file, filePath, getRV):
# (1, i, 1, 1) # Node at bottom of current story
# (1, i + 1, 1, 1) # Node at top of current story
for i in range(1, numStories + 2):
- nodeTagBot = 0
+ nodeTagBot = 0 # noqa: N806
if i == 1:
# Node tag at ground floor is different from those on upper stories (1, i, 1, 0)
- nodeTagBot = 1010 + 100 * i
- elif i > 9:
- nodeTagBot = 10011 + 100 * i
+ nodeTagBot = 1010 + 100 * i # noqa: N806
+ elif i > 9: # noqa: PLR2004
+ nodeTagBot = 10011 + 100 * i # noqa: N806
else:
- nodeTagBot = 1011 + 100 * i
+ nodeTagBot = 1011 + 100 * i # noqa: N806
# Create the node and add it to the node mapping array
node_entry = {}
@@ -157,21 +157,21 @@ def main(BIM_file, EVENT_file, SAM_file, model_file, filePath, getRV):
# Go back to the current directory before saving the SAM file
os.chdir(workingDirectory)
- with open(SAM_file, 'w') as f:
+ with open(SAM_file, 'w') as f: # noqa: PTH123
json.dump(root_SAM, f, indent=2)
# Copy over the .tcl files of the building model into the working directory
if getRV is False:
- pathToMainScriptFolder = (
+ pathToMainScriptFolder = ( # noqa: N806
workingDirectory + '/BuildingNonlinearModels/DynamicAnalysis/'
)
- if os.path.isdir(pathToMainScriptFolder):
- print(pathToMainScriptFolder)
+ if os.path.isdir(pathToMainScriptFolder): # noqa: PTH112
+ print(pathToMainScriptFolder) # noqa: T201
src_files = os.listdir(pathToMainScriptFolder)
for file_name in src_files:
- full_file_name = os.path.join(pathToMainScriptFolder, file_name)
- if os.path.isfile(full_file_name):
+ full_file_name = os.path.join(pathToMainScriptFolder, file_name) # noqa: PTH118
+ if os.path.isfile(full_file_name): # noqa: PTH113
shutil.copy(full_file_name, workingDirectory)
diff --git a/modules/createSAM/AutoSDA/model_generation.py b/modules/createSAM/AutoSDA/model_generation.py
index 2feb80006..656f1d84e 100644
--- a/modules/createSAM/AutoSDA/model_generation.py
+++ b/modules/createSAM/AutoSDA/model_generation.py
@@ -1,4 +1,4 @@
-# This file creates a function that is called by "main_generation.py" to perform nonlinear model generation
+# This file creates a function that is called by "main_generation.py" to perform nonlinear model generation # noqa: INP001, D100
# Modified by: Stevan Gavrilovic @ SimCenter, UC Berkeley
# Last revision: 09/2020
@@ -9,7 +9,7 @@
from nonlinear_analysis import NonlinearAnalysis
-def model_generation(base_directory, pathDataFolder, workingDirectory):
+def model_generation(base_directory, pathDataFolder, workingDirectory): # noqa: N803, D103
##########################################################################
# Load Building Design Result #
##########################################################################
@@ -17,14 +17,14 @@ def model_generation(base_directory, pathDataFolder, workingDirectory):
# Change the directory to the folder where the design results are stored
os.chdir(workingDirectory + '/BuildingDesignResults/')
# Load all design results (stored as .pkl files)
- with open('construction_building.pkl', 'rb') as file:
- building = pickle.load(file)
- with open('construction_column_set.pkl', 'rb') as file:
- column_set = pickle.load(file)
- with open('construction_beam_set.pkl', 'rb') as file:
- beam_set = pickle.load(file)
- with open('construction_connection_set.pkl', 'rb') as file:
- connection_set = pickle.load(file)
+ with open('construction_building.pkl', 'rb') as file: # noqa: PTH123
+ building = pickle.load(file) # noqa: S301
+ with open('construction_column_set.pkl', 'rb') as file: # noqa: PTH123
+ column_set = pickle.load(file) # noqa: S301
+ with open('construction_beam_set.pkl', 'rb') as file: # noqa: PTH123
+ beam_set = pickle.load(file) # noqa: S301
+ with open('construction_connection_set.pkl', 'rb') as file: # noqa: PTH123
+ connection_set = pickle.load(file) # noqa: S301
##########################################################################
# Update the Building Directory #
@@ -58,6 +58,6 @@ def model_generation(base_directory, pathDataFolder, workingDirectory):
analysis_list = ['EigenValueAnalysis', 'PushoverAnalysis', 'DynamicAnalysis']
for analysis_type in analysis_list:
- model = NonlinearAnalysis(
+ model = NonlinearAnalysis( # noqa: F841
building, column_set, beam_set, connection_set, analysis_type
)
diff --git a/modules/createSAM/AutoSDA/nonlinear_analysis.py b/modules/createSAM/AutoSDA/nonlinear_analysis.py
index 5aed397ea..77ae21e67 100644
--- a/modules/createSAM/AutoSDA/nonlinear_analysis.py
+++ b/modules/createSAM/AutoSDA/nonlinear_analysis.py
@@ -1,4 +1,4 @@
-# This file is used to include all user defined classes and functions
+# This file is used to include all user defined classes and functions # noqa: INP001, D100
# Developed by GUAN, XINGQUAN @ UCLA in Dec 2018
# Modified by: Stevan Gavrilovic @ SimCenter, UC Berkeley
@@ -35,7 +35,7 @@ class NonlinearAnalysis:
(14) copy baseline files and revise if necessary
(15) define various recorders for output
(16) define pushover loading pattern
- """
+ """ # noqa: D205, D400, D404
def __init__(
self,
@@ -59,10 +59,10 @@ def __init__(
y: from 0 to (bay number+1)
:param analysis_type: a string specifies which analysis type the current model is for
options: 'EigenValueAnalysis', 'PushoverAnalysis', 'DynamicAnalysis'
- """
+ """ # noqa: D205, D400, D401, D404
# User-hints: if wrong analysis_type is input
if (
- analysis_type != 'EigenValueAnalysis'
+ analysis_type != 'EigenValueAnalysis' # noqa: PLR1714
and analysis_type != 'PushoverAnalysis'
and analysis_type != 'DynamicAnalysis'
):
@@ -73,15 +73,15 @@ def __init__(
sys.exit(99)
# Change the working directory to the target building folder
- if not os.path.exists(building.directory['building nonlinear model']):
- os.makedirs(building.directory['building nonlinear model'])
+ if not os.path.exists(building.directory['building nonlinear model']): # noqa: PTH110
+ os.makedirs(building.directory['building nonlinear model']) # noqa: PTH103
os.chdir(building.directory['building nonlinear model'])
# Change the working directory to the desired folder (EigenValueAnalysis, PushoverAnalysis, or DynamicAnalysis)
target_folder = (
building.directory['building nonlinear model'] + '/' + analysis_type
)
- if not os.path.exists(target_folder):
- os.makedirs(target_folder)
+ if not os.path.exists(target_folder): # noqa: PTH110
+ os.makedirs(target_folder) # noqa: PTH103
os.chdir(target_folder)
# Call methods to write .tcl files for the building
@@ -115,14 +115,14 @@ def __init__(
self.write_damping(building)
self.write_dynamic_analysis_parameters(building)
- def write_nodes(self, building, column_set, beam_set):
+ def write_nodes(self, building, column_set, beam_set): # noqa: C901
"""Create a .tcl file to write node tags and coordinates for nonlinear analysis model
:param building: a class defined in "building_information.py"
:param column_set: a list[x][y] and each element is a class defined in "column_component.py"
:param beam_set: a list[x][z] and each element is a class defined in "beam_component.py"
:return: a .tcl file
- """
- with open('DefineNodes2DModel.tcl', 'w') as tclfile:
+ """ # noqa: D205, D400
+ with open('DefineNodes2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write(
'# This file will be used to define all nodes \n'
) # Introduce the file usage
@@ -130,15 +130,15 @@ def write_nodes(self, building, column_set, beam_set):
tclfile.write('# Set bay width and story height\n')
tclfile.write(
- 'set\tBayWidth\t[expr %.2f*12]; \n'
+ 'set\tBayWidth\t[expr %.2f*12]; \n' # noqa: UP031
% (building.geometry['X bay width'])
)
tclfile.write(
- 'set\tFirstStory\t[expr %.2f*12]; \n'
+ 'set\tFirstStory\t[expr %.2f*12]; \n' # noqa: UP031
% (building.geometry['first story height'])
)
tclfile.write(
- 'set\tTypicalStory\t[expr %.2f*12]; \n\n'
+ 'set\tTypicalStory\t[expr %.2f*12]; \n\n' # noqa: UP031
% (building.geometry['typical story height'])
)
@@ -196,7 +196,7 @@ def write_nodes(self, building, column_set, beam_set):
'NodesAroundPanelZone\t%i\t%i\t[expr %i*$BayWidth]'
% (j, i, j - 1)
)
- if i <= 2:
+ if i <= 2: # noqa: PLR2004
tclfile.write(
'\t[expr %i*$FirstStory+0*$TypicalStory]' % (i - 1)
)
@@ -219,7 +219,7 @@ def write_nodes(self, building, column_set, beam_set):
'\t[expr %i*$BayWidth]'
% (building.geometry['number of X bay'] + 1)
) # X coordinate
- if i <= 2:
+ if i <= 2: # noqa: PLR2004
tclfile.write(
'\t[expr %i*$FirstStory+0*$TypicalStory];' % (i - 1)
) # Y coordinate
@@ -276,8 +276,8 @@ def write_fixities(self, building):
"""Create a .tcl file to write boundary for the model
:param building: a class defined in "building_information.py"
:return: a .tcl file
- """
- with open('DefineFixities2DModel.tcl', 'w') as tclfile:
+ """ # noqa: D205, D400
+ with open('DefineFixities2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write(
'# This file will be used to define the fixity at all column bases \n\n\n'
)
@@ -295,9 +295,9 @@ def write_floor_constraint(self, building):
"""Create a .tcl file to write floor constraint, i.e., equal DOF
:param building: a class defined in "building_information.py"
:return: a .tcl file
- """
+ """ # noqa: D205, D400
# Create a .tcl file to write floor constraint, i.e., equal DOF
- with open('DefineFloorConstraint2DModel.tcl', 'w') as tclfile:
+ with open('DefineFloorConstraint2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# This file will be used to define floor constraint \n')
tclfile.write(
'# Nodes at same floor level have identical lateral displacement\n'
@@ -331,9 +331,9 @@ def write_beam_hinge_material(self, building, beam_set):
:param building: a class defined in "building_information.py"
:param beam_set: a list[x][z] and each element is a class defined in "beam_component.py"
:return: a .tcl file
- """
+ """ # noqa: D205, D400
material_tag = 70001
- with open('DefineBeamHingeMaterials2DModel.tcl', 'w') as tclfile:
+ with open('DefineBeamHingeMaterials2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write(
'# This file will be used to define beam hinge material models\n\n\n'
)
@@ -405,9 +405,9 @@ def write_column_hinge_material(self, building, column_set):
:param building: a class defined in "building_information.py"
:param column_set: a list[x][y] and each element is a class defined in "column_component.py" file
:return: a .tcl file
- """
+ """ # noqa: D205, D400
material_tag = 60001
- with open('DefineColumnHingeMaterials2DModel.tcl', 'w') as tclfile:
+ with open('DefineColumnHingeMaterials2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write(
'# This file will be used to define column hinge material models\n\n\n'
)
@@ -492,8 +492,8 @@ def write_beam(self, building):
"""Create a .tcl file to define the beam element
:param building: a class defined in "building_information.py" file
:return: a .tcl file
- """
- with open('DefineBeams2DModel.tcl', 'w') as tclfile:
+ """ # noqa: D205, D400
+ with open('DefineBeams2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# This file will be used to define beam elements \n\n\n')
tclfile.write('# Define beam section sizes \n')
for i in range(
@@ -559,8 +559,8 @@ def write_column(self, building):
"""Create a .tcl file to define column element
:param building: a class defined in "building_information.py" file
:return: a .tcl file
- """
- with open('DefineColumns2DModel.tcl', 'w') as tclfile:
+ """ # noqa: D205, D400
+ with open('DefineColumns2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# This file will be used to define columns \n\n\n')
# Define exterior column sizes
@@ -669,8 +669,8 @@ def write_beam_hinge(self, building):
"""Create a .tcl file to define beam hinge element (rotational spring)
:param building: a class defined in "building_information.py" file
:return: a .tcl file
- """
- with open('DefineBeamHinges2DModel.tcl', 'w') as tclfile:
+ """ # noqa: D205, D400
+ with open('DefineBeamHinges2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# This file will be used to define beam hinges \n\n\n')
tclfile.write(
@@ -717,8 +717,8 @@ def write_column_hinge(self, building):
"""Create a .tcl file to define column hinge element (rotational spring)
:param building: a class defined in "building_information.py" file
:return: a .tcl file
- """
- with open('DefineColumnHinges2DModel.tcl', 'w') as tclfile:
+ """ # noqa: D205, D400
+ with open('DefineColumnHinges2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# This file will be used to define column hinges\n\n\n')
for i in range(
1, building.geometry['number of story'] + 1
@@ -822,8 +822,8 @@ def write_mass(self, building):
"""Create a .tcl file which defines the mass of each floor at each node
:param building: a class defined in "building_information.py" file
:return: a .tcl file
- """
- with open('DefineMasses2DModel.tcl', 'w') as tclfile:
+ """ # noqa: D205, D400
+ with open('DefineMasses2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# This file will be used to define all nodal masses \n\n')
# Write values for floor weights, tributary mass ratio, and nodal mass
@@ -876,8 +876,8 @@ def write_panel_zone_elements(self, building):
"""Create a .tcl file that defines the elements in panel zone
:param building: a class defined in "building_information.py" file
:return: a .tcl file
- """
- with open('DefinePanelZoneElements.tcl', 'w') as tclfile:
+ """ # noqa: D205, D400
+ with open('DefinePanelZoneElements.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write(
'# This file will be used to define elements in panel zones \n\n'
)
@@ -899,7 +899,7 @@ def write_panel_zone_elements(self, building):
tclfile.write('\n')
tclfile.write('puts "Panel zone elements defined"')
- def write_panel_zone_springs(
+ def write_panel_zone_springs( # noqa: D102
self,
building,
column_set,
@@ -907,7 +907,7 @@ def write_panel_zone_springs(
connection_set,
):
# Create a .tcl file that defines the springs involved in panel zones
- with open('DefinePanelZoneSprings.tcl', 'w') as tclfile:
+ with open('DefinePanelZoneSprings.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write(
'# This file will be used to define springs in panel zone \n\n'
)
@@ -935,13 +935,13 @@ def write_panel_zone_springs(
'\t$Es\t$Fy'
) # Young's modulus and Yielding stress
tclfile.write(
- '\t%.2f' % column_set[i - 2][j - 1].section['d']
+ '\t%.2f' % column_set[i - 2][j - 1].section['d'] # noqa: UP031
) # column depth
tclfile.write(
- '\t%.2f' % column_set[i - 2][j - 1].section['bf']
+ '\t%.2f' % column_set[i - 2][j - 1].section['bf'] # noqa: UP031
) # column flange width
tclfile.write(
- '\t%.2f' % column_set[i - 2][j - 1].section['tf']
+ '\t%.2f' % column_set[i - 2][j - 1].section['tf'] # noqa: UP031
) # column flange thickness
# Use actual panel zone thickness rather than the assumed column web thickness
tclfile.write(
@@ -955,11 +955,11 @@ def write_panel_zone_springs(
# note that j is the column number.
# the number of beam at each floor level is one less than that of columns
tclfile.write(
- '\t%.2f' % beam_set[i - 2][j - 1].section['d']
+ '\t%.2f' % beam_set[i - 2][j - 1].section['d'] # noqa: UP031
) # beam depth
else:
tclfile.write(
- '\t%.2f' % beam_set[i - 2][-1].section['d']
+ '\t%.2f' % beam_set[i - 2][-1].section['d'] # noqa: UP031
) # beam depth
tclfile.write(
'\t1.1\t0.03; \n'
@@ -967,9 +967,9 @@ def write_panel_zone_springs(
tclfile.write('\n')
tclfile.write('puts "Panel zone springs defined"')
- def write_gravity_load(self, building):
+ def write_gravity_load(self, building): # noqa: D102
# Create a .tcl file to write gravity load: 1.00 DL + 0.25 LL
- with open('DefineGravityLoads2DModel.tcl', 'w') as tclfile:
+ with open('DefineGravityLoads2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define expected gravity loads\n\n\n')
# Assign the beam dead load values
@@ -1048,9 +1048,9 @@ def write_gravity_load(self, building):
tclfile.write('puts "Expected gravity loads defined"')
- def write_pushover_loading(self, building):
+ def write_pushover_loading(self, building): # noqa: D102
# Create a .tcl file to write lateral pushover loading
- with open('DefinePushoverLoading2DModel.tcl', 'w') as tclfile:
+ with open('DefinePushoverLoading2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define pushover loading\n\n\n')
tclfile.write('pattern\tPlain\t200\tLinear\t{\n\n')
tclfile.write('# Pushover pattern\n')
@@ -1072,9 +1072,9 @@ def write_pushover_loading(self, building):
tclfile.write('\n')
tclfile.write('}')
- def write_base_reaction_recorder(self, building):
+ def write_base_reaction_recorder(self, building): # noqa: D102
# Create a .tcl file to write the recorders for base reactions
- with open('DefineBaseReactionRecorders2DModel.tcl', 'w') as tclfile:
+ with open('DefineBaseReactionRecorders2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define base node reaction recorders\n\n\n')
tclfile.write('cd\t$baseDir/$dataDir/BaseReactions\n\n')
@@ -1096,9 +1096,9 @@ def write_base_reaction_recorder(self, building):
tclfile.write('\t%i%i' % (building.geometry['number of X bay'] + 2, 1))
tclfile.write('\t-dof\t1\treaction;\n\n')
- def write_beam_hinge_recorder(self, building):
+ def write_beam_hinge_recorder(self, building): # noqa: D102
# Create a .tcl file to record beam hinge forces and deformation
- with open('DefineBeamHingeRecorders2DModel.tcl', 'w') as tclfile:
+ with open('DefineBeamHingeRecorders2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define beam hinge force-deformation recorders\n\n\n')
tclfile.write('cd\t$baseDir/$dataDir/BeamHingeMoment\n\n')
@@ -1129,9 +1129,9 @@ def write_beam_hinge_recorder(self, building):
tclfile.write('\tdeformation;\n')
tclfile.write('\n')
- def write_column_hinge_recorder(self, building):
+ def write_column_hinge_recorder(self, building): # noqa: D102
# Create a .tcl file to record column hinge forces and deformations
- with open('DefineColumnHingeRecorders2DModel.tcl', 'w') as tclfile:
+ with open('DefineColumnHingeRecorders2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define column hinge force-deformation recorders\n\n\n')
tclfile.write('cd\t$baseDir/$dataDir/ColumnHingeMoment\n\n')
@@ -1162,9 +1162,9 @@ def write_column_hinge_recorder(self, building):
tclfile.write('\tdeformation;')
tclfile.write('\n')
- def write_beam_force_recorder(self, building):
+ def write_beam_force_recorder(self, building): # noqa: D102
# Create a .tcl file to write beam element forces recorder for output
- with open('DefineGlobalBeamForceRecorders2DModel.tcl', 'w') as tclfile:
+ with open('DefineGlobalBeamForceRecorders2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define global beam force recorders\n\n\n')
tclfile.write('cd\t$baseDir/$dataDir/GlobalBeamForces\n\n')
tclfile.write('# X-Direction beam element global force recorders\n')
@@ -1177,9 +1177,9 @@ def write_beam_force_recorder(self, building):
tclfile.write('\t%i%i%i%i%i%i%i' % (2, j, i, 1, j + 1, i, 1))
tclfile.write('\tforce\n')
- def write_column_force_recorder(self, building):
+ def write_column_force_recorder(self, building): # noqa: D102
# Create a .tcl file to write column element forces recorder for output
- with open('DefineGlobalColumnForceRecorders2DModel.tcl', 'w') as tclfile:
+ with open('DefineGlobalColumnForceRecorders2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define global column force recorders\n\n\n')
tclfile.write('cd\t$baseDir/$dataDir/GlobalBeamForces\n\n')
tclfile.write('# Column element global force recorders\n')
@@ -1195,9 +1195,9 @@ def write_column_force_recorder(self, building):
tclfile.write('\tforce;\n')
tclfile.write('\n')
- def write_node_displacement_recorder(self, building):
+ def write_node_displacement_recorder(self, building): # noqa: D102
# Create a .tcl file to write the node displacements recorder for output
- with open('DefineNodeDisplacementRecorders2DModel.tcl', 'w') as tclfile:
+ with open('DefineNodeDisplacementRecorders2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define node displacement recorders\n\n\n')
tclfile.write('cd\t$baseDir/$dataDir/NodeDisplacements\n\n')
for i in range(1, building.geometry['number of story'] + 2):
@@ -1211,9 +1211,9 @@ def write_node_displacement_recorder(self, building):
tclfile.write('\t%i%i%i%i' % (j, i, 1, 1))
tclfile.write('\t-dof\t1\tdisp;\n')
- def write_story_drift_recorder(self, building, analysis_type):
+ def write_story_drift_recorder(self, building, analysis_type): # noqa: D102
# Create a .tcl file to write story drift recorder for output
- with open('DefineStoryDriftRecorders2DModel.tcl', 'w') as tclfile:
+ with open('DefineStoryDriftRecorders2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define story drift recorders\n\n\n')
if analysis_type == 'PushoverAnalysis':
@@ -1262,9 +1262,9 @@ def write_story_drift_recorder(self, building, analysis_type):
)
tclfile.write('\t-dof\t1\t-perpDirn\t2; \n')
- def write_node_acceleration_recorder(self, building):
+ def write_node_acceleration_recorder(self, building): # noqa: D102
# Create a .tcl file to record absolute node acceleration
- with open('DefineNodeAccelerationRecorders2DModel.tcl', 'w') as tclfile:
+ with open('DefineNodeAccelerationRecorders2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# Define node acceleration recorders\n\n\n')
tclfile.write(
'cd $baseDir/$dataDir/EQ_$eqNumber/Scale_$scale/NodeAccelerations\n\n'
@@ -1281,9 +1281,9 @@ def write_node_acceleration_recorder(self, building):
tclfile.write('\t%i%i%i%i' % (j, i, 1, 1))
tclfile.write('\t-dof\t1\taccel;\n')
- def write_damping(self, building):
+ def write_damping(self, building): # noqa: D102
# Create a .tcl file to define damping for dynamic analysis
- with open('DefineDamping2DModel.tcl', 'w') as tclfile:
+ with open('DefineDamping2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write('# This file will be used to define damping\n\n')
tclfile.write('# A damping ratio of 2% is used for steel buildings\n')
@@ -1346,9 +1346,9 @@ def write_damping(self, building):
tclfile.write('\t-rayleigh\t$alpha0\t0.0\t0.0\t0.0;\n\n')
tclfile.write('puts "Rayleigh damping defined"')
- def write_dynamic_analysis_parameters(self, building):
+ def write_dynamic_analysis_parameters(self, building): # noqa: D102
# Create a .tcl file to define all parameters pertinent to dynamic analysis solver
- with open('DefineDynamicAnalysisParameters2DModel.tcl', 'w') as tclfile:
+ with open('DefineDynamicAnalysisParameters2DModel.tcl', 'w') as tclfile: # noqa: PTH123
tclfile.write(
'# This file will be used to define analysis parameters relevant to dynamic solver\n\n\n'
)
@@ -1380,7 +1380,7 @@ def copy_baseline_eigen_files(self, building, analysis_type):
:param analysis_type: a string specifies the analysis type that the current nonlinear model is for
options: 'EigenValueAnalysis', 'PushoverAnalysis', 'DynamicAnalysis'
:return:
- """
+ """ # noqa: D205, D400, D401
# Change the working directory to the folder where baseline .tcl files are stored
source_dir = (
building.directory['baseline files nonlinear'] + '/' + analysis_type
@@ -1409,33 +1409,33 @@ def copy_baseline_eigen_files(self, building, analysis_type):
# Revise the baseline file: EigenValueAnalysis.tcl if building has less than four stories.
# Default EigenValueAnalysis.tcl file analyzes four modes.
# For buildings which have three stories or below, they might only have 1st mode, 2nd mode, and 3rd mode.
- if building.geometry['number of story'] <= 3:
+ if building.geometry['number of story'] <= 3: # noqa: PLR2004
# This is to change the number of desired mode
new_mode = 'set nEigenL 3'
# Releast the equal DOF constraints for buildings with less than 3 stories
- with open('Model.tcl') as file:
+ with open('Model.tcl') as file: # noqa: PTH123
content = file.read()
new_content = content.replace(
'source DefineFloorConstraint2DModel.tcl',
'# source DefineFloorConstraint2DModel.tcl',
)
- with open('Model.tcl', 'w') as file:
+ with open('Model.tcl', 'w') as file: # noqa: PTH123
file.write(new_content)
# This is to change the node tag to record eigen vector
old_string = '**EIGENVECTOR_NODE**'
new_string = '1110'
for floor in range(1, building.geometry['number of story'] + 1):
new_string += ' %i%i%i%i' % (1, floor + 1, 1, 1)
- with open('EigenValueAnalysis.tcl') as file:
+ with open('EigenValueAnalysis.tcl') as file: # noqa: PTH123
content = file.read()
new_content = content.replace(old_mode, new_mode)
new_content = new_content.replace(old_string, new_string)
- with open('EigenValueAnalysis.tcl', 'w') as file:
+ with open('EigenValueAnalysis.tcl', 'w') as file: # noqa: PTH123
file.write(new_content)
# Perform Eigen Analysis to obtain the periods which will be necessary for raleigh damping in dynamic part
cmd = 'OpenSees Model.tcl'
- subprocess.Popen(cmd, shell=True).wait()
+ subprocess.Popen(cmd, shell=True).wait() # noqa: S602
# Update pushover parameters contained Model.tcl when performing pushover analysis
elif analysis_type == 'PushoverAnalysis':
@@ -1452,11 +1452,11 @@ def copy_baseline_eigen_files(self, building, analysis_type):
'0.01',
'%.2f' % (0.1 * building.geometry['floor height'][-1] * 12),
] # DisplamentMaximum should be in inch.
- with open('Model.tcl') as file:
+ with open('Model.tcl') as file: # noqa: PTH123
content = file.read()
for indx in range(len(old_string)):
content = content.replace(old_string[indx], new_string[indx])
- with open('Model.tcl', 'w') as file:
+ with open('Model.tcl', 'w') as file: # noqa: PTH123
file.write(content)
# Update Model.tcl and RunIDA2DModel.tcl files for dynamic analysis
@@ -1477,7 +1477,7 @@ def copy_baseline_eigen_files(self, building, analysis_type):
os.chdir(
building.directory['building nonlinear model'] + '/' + analysis_type
)
- with open('Model.tcl') as file:
+ with open('Model.tcl') as file: # noqa: PTH123
content = file.read()
content = content.replace(
old_periods[0], str(periods[0])
@@ -1486,10 +1486,10 @@ def copy_baseline_eigen_files(self, building, analysis_type):
old_periods[1], str(periods[2])
) # Third-mode period
# Write the updated content into Model.tcl
- with open('Model.tcl', 'w') as file:
+ with open('Model.tcl', 'w') as file: # noqa: PTH123
file.write(content)
# Update dynamic parameters in RunIDA2DModel.tcl
- with open('RunIDA2DModel.tcl') as file:
+ with open('RunIDA2DModel.tcl') as file: # noqa: PTH123
content = file.read()
old_string = [
'**NumberOfGroundMotions**',
@@ -1500,5 +1500,5 @@ def copy_baseline_eigen_files(self, building, analysis_type):
for indx in range(len(old_string)):
content = content.replace(old_string[indx], str(new_string[indx]))
# Write the new content back into RunIDA2DModel.tcl
- with open('RunIDA2DModel.tcl', 'w') as file:
+ with open('RunIDA2DModel.tcl', 'w') as file: # noqa: PTH123
file.write(content)
diff --git a/modules/createSAM/AutoSDA/seismic_design.py b/modules/createSAM/AutoSDA/seismic_design.py
index 688a6317e..803adf3ab 100644
--- a/modules/createSAM/AutoSDA/seismic_design.py
+++ b/modules/createSAM/AutoSDA/seismic_design.py
@@ -5,7 +5,7 @@
# Modified by: Stevan Gavrilovic @ SimCenter, UC Berkeley
# Last revision: 09/2020
-"""
+""" # noqa: INP001, D400, D404
##########################################################################
# Load Built-in Packages #
@@ -38,7 +38,7 @@
##########################################################################
-def seismic_design(base_directory, pathDataFolder, workingDirectory):
+def seismic_design(base_directory, pathDataFolder, workingDirectory): # noqa: C901, N803, D103, PLR0912, PLR0915
# **************** Debug using only **********************************
# building_id = 'Test3'
# from global_variables import base_directory
@@ -105,12 +105,12 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
* RBS_STIFFNESS_FACTOR
<= 0.025 / building_1.elf_parameters['rho']
):
- print('Member size after optimization %i' % iteration)
- print('Exterior column:', building_1.member_size['exterior column'])
- print('Interior column:', building_1.member_size['interior column'])
- print('Beam:', building_1.member_size['beam'])
- print('Current story drifts: (%)')
- print(
+ print('Member size after optimization %i' % iteration) # noqa: T201
+ print('Exterior column:', building_1.member_size['exterior column']) # noqa: T201
+ print('Interior column:', building_1.member_size['interior column']) # noqa: T201
+ print('Beam:', building_1.member_size['beam']) # noqa: T201
+ print('Current story drifts: (%)') # noqa: T201
+ print( # noqa: T201
building_1.elastic_response['story drift']
* 5.5
* RBS_STIFFNESS_FACTOR
@@ -199,7 +199,7 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
# Check the flag of each column
if not column_set[story][column_no].check_flag():
sys.stderr.write(
- 'column_%s%s is not feasible!!!\n' % (story, column_no)
+ 'column_%s%s is not feasible!!!\n' % (story, column_no) # noqa: UP031
)
not_feasible_column.append([story, column_no])
# sys.exit(1)
@@ -300,7 +300,7 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
)
# Check the flag of each beam
if not beam_set[story][bay].check_flag():
- sys.stderr.write('beam_%s%s is not feasible!!!\n' % (story, bay))
+ sys.stderr.write('beam_%s%s is not feasible!!!\n' % (story, bay)) # noqa: UP031
not_feasible_beam.append([story, bay])
# sys.exit(1)
@@ -449,7 +449,7 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
)
if not connection_set[story][connection_no].check_flag():
sys.stderr.write(
- 'connection_%s%s is not feasible!!!\n' % (story, connection_no)
+ 'connection_%s%s is not feasible!!!\n' % (story, connection_no) # noqa: UP031
)
not_feasible_connection.append([story, connection_no])
# sys.exit(1)
@@ -645,19 +645,19 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
building_1.read_modal_period()
building_1.compute_seismic_force()
# Re-perform the elastic analysis to obtain the updated demand
- model_1 = ElasticAnalysis(
+ model_1 = ElasticAnalysis( # noqa: F841
building_1, for_drift_only=False, for_period_only=False
)
building_1.read_story_drift()
# **************************** Debug Using Only *************************************
i += 1
- print('Optimal member size after upscale column%i' % i)
- print('Exterior column:', building_1.member_size['exterior column'])
- print('Interior column:', building_1.member_size['interior column'])
- print('Beam:', building_1.member_size['beam'])
- print('After upscale column, current story drift is: ')
- print(building_1.elastic_response['story drift'] * 5.5 * 1.1 * 100)
+ print('Optimal member size after upscale column%i' % i) # noqa: T201
+ print('Exterior column:', building_1.member_size['exterior column']) # noqa: T201
+ print('Interior column:', building_1.member_size['interior column']) # noqa: T201
+ print('Beam:', building_1.member_size['beam']) # noqa: T201
+ print('After upscale column, current story drift is: ') # noqa: T201
+ print(building_1.elastic_response['story drift'] * 5.5 * 1.1 * 100) # noqa: T201
# **************************** Debug Ends Here **************************************
elastic_demand = ElasticOutput(building_1)
@@ -845,7 +845,7 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
# Check the flag of each beam (might not be necessary)
if not construction_beam_set[story][bay].check_flag():
sys.stderr.write(
- 'Construction beam_%s%s is not feasible!!!\n' % (story, bay)
+ 'Construction beam_%s%s is not feasible!!!\n' % (story, bay) # noqa: UP031
)
# Construct new column objects after considering constructability
@@ -889,7 +889,7 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
# Check the flag of each column (May not be necessary)
if not construction_column_set[story][column_no].check_flag():
sys.stderr.write(
- 'Construction column_%s%s is not feasible!!!\n'
+ 'Construction column_%s%s is not feasible!!!\n' # noqa: UP031
% (story, column_no)
)
@@ -996,7 +996,7 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
connection_no
].check_flag(): # (Might not be necessary)
sys.stderr.write(
- 'Construction connection_%s%s is not feasible!!!\n'
+ 'Construction connection_%s%s is not feasible!!!\n' # noqa: UP031
% (story, connection_no)
)
not_feasible_construction_connection.append([story, connection_no])
@@ -1239,19 +1239,19 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
building_2.read_modal_period()
building_2.compute_seismic_force()
# Re-perform the elastic analysis to obtain the updated demand
- model_2 = ElasticAnalysis(
+ model_2 = ElasticAnalysis( # noqa: F841
building_2, for_drift_only=False, for_period_only=False
)
building_2.read_story_drift()
# **************************** Debug Using Only *************************************
i += 1
- print('Construction#1 member size after upscale column%i' % iteration)
- print('Exterior column:', building_1.member_size['exterior column'])
- print('Interior column:', building_1.member_size['interior column'])
- print('Beam:', building_1.member_size['beam'])
- print('After upscale column, current story drift is: ')
- print(building_1.elastic_response['story drift'] * 5.5 * 1.1 * 100)
+ print('Construction#1 member size after upscale column%i' % iteration) # noqa: T201
+ print('Exterior column:', building_1.member_size['exterior column']) # noqa: T201
+ print('Interior column:', building_1.member_size['interior column']) # noqa: T201
+ print('Beam:', building_1.member_size['beam']) # noqa: T201
+ print('After upscale column, current story drift is: ') # noqa: T201
+ print(building_1.elastic_response['story drift'] * 5.5 * 1.1 * 100) # noqa: T201
# **************************** Debug Ends Here **************************************
elastic_demand_2 = ElasticOutput(building_2)
@@ -1452,7 +1452,7 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
building_3.compute_seismic_force()
# Perform elastic analysis for construction sizes
- model_3 = ElasticAnalysis(
+ model_3 = ElasticAnalysis( # noqa: F841
building_3, for_drift_only=False, for_period_only=False
)
building_3.read_story_drift()
@@ -1496,7 +1496,7 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
# Check the flag of each column (May not be necessary)
if not construction_column_set[story][column_no].check_flag():
sys.stderr.write(
- 'Construction column_%s%s is not feasible!!!\n'
+ 'Construction column_%s%s is not feasible!!!\n' # noqa: UP031
% (story, column_no)
)
@@ -1537,7 +1537,7 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
# Check the flag of each column (May not be necessary)
if not construction_column_set[story][column_no].check_flag():
sys.stderr.write(
- 'Construction column_%s%s is not feasible!!!\n'
+ 'Construction column_%s%s is not feasible!!!\n' # noqa: UP031
% (story, column_no)
)
@@ -1550,7 +1550,7 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
construction_column_set[story][col_no].section['bf']
< construction_beam_set[story][0].section['bf']
):
- print('Column width in Story %i is less than beam' % (story))
+ print('Column width in Story %i is less than beam' % (story)) # noqa: T201
# ********************************************************************
# ///////////////// Store Design Results /////////////////////////////
@@ -1568,33 +1568,33 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
# Nonlinear model generation may require information for building, beam/column hinge, and panel zone thickness.
# Store the building class to "building.pkl"
- with open('optimal_building.pkl', 'wb') as output_file:
+ with open('optimal_building.pkl', 'wb') as output_file: # noqa: PTH123
pickle.dump(building_1, output_file)
- with open('construction_building.pkl', 'wb') as output_file:
+ with open('construction_building.pkl', 'wb') as output_file: # noqa: PTH123
pickle.dump(building_3, output_file)
# Store the beam set to "beam_set.pkl"
- with open('optimal_beam_set.pkl', 'wb') as output_file:
+ with open('optimal_beam_set.pkl', 'wb') as output_file: # noqa: PTH123
pickle.dump(beam_set, output_file)
# Store the column set to "column_set.pkl"
- with open('optimal_column_set.pkl', 'wb') as output_file:
+ with open('optimal_column_set.pkl', 'wb') as output_file: # noqa: PTH123
pickle.dump(column_set, output_file)
# Store the connection set to "connection_set.pkl"
- with open('optimal_connection_set.pkl', 'wb') as output_file:
+ with open('optimal_connection_set.pkl', 'wb') as output_file: # noqa: PTH123
pickle.dump(connection_set, output_file)
# Store the construction beam set
- with open('construction_beam_set.pkl', 'wb') as output_file:
+ with open('construction_beam_set.pkl', 'wb') as output_file: # noqa: PTH123
pickle.dump(construction_beam_set, output_file)
# Store the construction column set
- with open('construction_column_set.pkl', 'wb') as output_file:
+ with open('construction_column_set.pkl', 'wb') as output_file: # noqa: PTH123
pickle.dump(construction_column_set, output_file)
- with open('construction_connection_set.pkl', 'wb') as output_file:
+ with open('construction_connection_set.pkl', 'wb') as output_file: # noqa: PTH123
pickle.dump(construction_connection_set, output_file)
# Store the member sizes and story drift into csv files.
@@ -1639,7 +1639,7 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
# Store the doubler plate thickness
header = []
for bay in range(building_1.geometry['number of X bay'] + 1):
- header.append('connection %s' % bay)
+ header.append('connection %s' % bay) # noqa: PERF401, UP031
# Initialize the dataframe to store doubler plate thickness
optimal_doubler_plate = pd.DataFrame(columns=header)
construction_doubler_plate = pd.DataFrame(columns=header)
@@ -1707,14 +1707,14 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
# Define the headers for the columns DC ratio
header = []
for bay in range(building_1.geometry['number of X bay'] + 1):
- header.extend(['column %s' % bay])
+ header.extend(['column %s' % bay]) # noqa: UP031
force_list = ['axial', 'shear', 'flexural']
for force in force_list:
- column_DC = [
+ column_DC = [ # noqa: N806
[0] * (building_1.geometry['number of X bay'] + 1)
for story in range(building_1.geometry['number of story'])
]
- construction_column_DC = [
+ construction_column_DC = [ # noqa: N806
[0] * (building_1.geometry['number of X bay'] + 1)
for story in range(building_1.geometry['number of story'])
]
@@ -1743,14 +1743,14 @@ def seismic_design(base_directory, pathDataFolder, workingDirectory):
# Define the headers for the beams DC ratio
header = []
for bay in range(building_1.geometry['number of X bay']):
- header.extend(['beam %s' % bay])
+ header.extend(['beam %s' % bay]) # noqa: UP031
force_list = ['shear', 'flexural']
for force in force_list:
- beam_DC = [
+ beam_DC = [ # noqa: N806
[0] * (building_1.geometry['number of X bay'])
for story in range(building_1.geometry['number of story'])
]
- construction_beam_DC = [
+ construction_beam_DC = [ # noqa: N806
[0] * (building_1.geometry['number of X bay'])
for story in range(building_1.geometry['number of story'])
]
diff --git a/modules/createSAM/AutoSDA/steel_material.py b/modules/createSAM/AutoSDA/steel_material.py
index 5dbf5d7bc..a1f95ef12 100644
--- a/modules/createSAM/AutoSDA/steel_material.py
+++ b/modules/createSAM/AutoSDA/steel_material.py
@@ -1,4 +1,4 @@
-# This file is used to define the class of Building
+# This file is used to define the class of Building # noqa: INP001, D100
# Developed by GUAN, XINGQUAN @ UCLA in June 2018
# Updated in Sept. 2018
@@ -15,18 +15,18 @@ class SteelMaterial:
(2) Ultimate stress (Fu)
(3) Young's modulus (E)
(4) Ry value
- """
+ """ # noqa: D205, D400, D404
def __init__(
self,
yield_stress=50,
ultimate_stress=65,
elastic_modulus=29000,
- Ry_value=1.1,
+ Ry_value=1.1, # noqa: N803
):
""":param yield_stress: Fy of steel material, default value is 50 ksi
:param elastic_modulus: E of steel material, default value is 29000 ksi
- """
+ """ # noqa: D205
self.Fy = yield_stress
self.Fu = ultimate_stress
self.E = elastic_modulus
diff --git a/modules/createSAM/RCFIAP/RC_FIAP_main.py b/modules/createSAM/RCFIAP/RC_FIAP_main.py
index 59883b1c0..6ecb991a5 100644
--- a/modules/createSAM/RCFIAP/RC_FIAP_main.py
+++ b/modules/createSAM/RCFIAP/RC_FIAP_main.py
@@ -1,4 +1,4 @@
-# ############################################################### ##
+# ############################################################### ## # noqa: INP001, D100
# RC_FIAP (Reinforced Concrete Frame Inelastic Analysis Platform) ##
# ##
# Developed by: ##
@@ -24,7 +24,7 @@
# Definition of units
m = 1.0 # define basic units -- output units
-kN = 1.0 # define basic units -- output units
+kN = 1.0 # define basic units -- output units # noqa: N816
sec = 1.0 # define basic units -- output units
mm = m / 1000.0 # define engineering units
cm = m / 100.0
@@ -50,21 +50,21 @@
np.set_printoptions(precision=6)
-class BeamElasticElement:
+class BeamElasticElement: # noqa: D101
def __init__(
self,
- EleTag,
- Nod_ini,
- Nod_end,
- AEle,
- EcEle,
- IzEle,
- LEle,
- BEle,
- HEle,
- ElegTr,
- RZi,
- RZe,
+ EleTag, # noqa: N803
+ Nod_ini, # noqa: N803
+ Nod_end, # noqa: N803
+ AEle, # noqa: N803
+ EcEle, # noqa: N803
+ IzEle, # noqa: N803
+ LEle, # noqa: N803
+ BEle, # noqa: N803
+ HEle, # noqa: N803
+ ElegTr, # noqa: N803
+ RZi, # noqa: N803
+ RZe, # noqa: N803
):
self.EleTag = EleTag
self.Nod_ini = Nod_ini
@@ -80,30 +80,30 @@ def __init__(
self.RZe = RZe
-class BeamDesing:
- def __init__(
+class BeamDesing: # noqa: D101
+ def __init__( # noqa: PLR0913
self,
- EleTag,
+ EleTag, # noqa: N803
b,
h,
- Ast1,
+ Ast1, # noqa: N803
dt1,
- Mn_n1,
- Asb1,
+ Mn_n1, # noqa: N803
+ Asb1, # noqa: N803
db1,
- Mn_p1,
+ Mn_p1, # noqa: N803
ns1,
ss1,
- Ast2,
+ Ast2, # noqa: N803
dt2,
- Mn_n2,
- Asb2,
+ Mn_n2, # noqa: N803
+ Asb2, # noqa: N803
db2,
- Mn_p2,
+ Mn_p2, # noqa: N803
ns2,
ss2,
- Nod_ini,
- Nod_end,
+ Nod_ini, # noqa: N803
+ Nod_end, # noqa: N803
db_t1,
db_b1,
db_t2,
@@ -136,30 +136,30 @@ def __init__(
self.db_b2 = db_b2
-class ColDesing:
- def __init__(
+class ColDesing: # noqa: D101
+ def __init__( # noqa: PLR0913
self,
- EleTag,
+ EleTag, # noqa: N803
b,
h,
- nbH,
- nbB,
+ nbH, # noqa: N803
+ nbB, # noqa: N803
db,
- As,
- Pu_v,
- Mu_v,
- fiPn,
- fiMn,
- Mn_i,
+ As, # noqa: N803
+ Pu_v, # noqa: N803
+ Mu_v, # noqa: N803
+ fiPn, # noqa: N803
+ fiMn, # noqa: N803
+ Mn_i, # noqa: N803
d,
dist,
ro,
- Mu_i,
+ Mu_i, # noqa: N803
sst,
- nsB,
- nsH,
- Nod_ini,
- Nod_end,
+ nsB, # noqa: N803
+ nsH, # noqa: N803
+ Nod_ini, # noqa: N803
+ Nod_end, # noqa: N803
):
self.EleTag = EleTag
self.b = b
@@ -184,8 +184,8 @@ def __init__(
self.Nod_end = Nod_end
-class DuctilityCurve:
- def __init__(self, xi, xe, yi, ye, CD_i, CD_e):
+class DuctilityCurve: # noqa: D101
+ def __init__(self, xi, xe, yi, ye, CD_i, CD_e): # noqa: N803
self.xi = xi
self.xe = xe
self.yi = yi
@@ -194,24 +194,24 @@ def __init__(self, xi, xe, yi, ye, CD_i, CD_e):
self.CD_e = CD_e
-class TclLogger:
+class TclLogger: # noqa: D101
def __init__(self):
self.list_of_lines = [
'# This is an autogenerated .tcl file from SimCenter workflow'
]
# Add a string line to the output file
- def add_line(self, line, addNewLine=True):
- if addNewLine == True:
+ def add_line(self, line, addNewLine=True): # noqa: FBT002, N803, D102
+ if addNewLine == True: # noqa: E712
self.list_of_lines.append(line + '\n')
else:
self.list_of_lines.append(line)
# Convenience function to create a line from an array of inputs to openseespy function
- def add_array(self, line, addNewLine=True):
- outLine = ''
+ def add_array(self, line, addNewLine=True): # noqa: FBT002, N803, D102
+ outLine = '' # noqa: N806
for item in line:
- outLine += str(item) + ' '
+ outLine += str(item) + ' ' # noqa: N806
# # Remove the last space
# outLine = outLine.rstrip()
@@ -221,17 +221,17 @@ def add_array(self, line, addNewLine=True):
self.add_line(outLine, addNewLine)
# Save the output file
- def save_as_file(self):
+ def save_as_file(self): # noqa: D102
# Get the current directory
- workingDirectory = os.getcwd()
+ workingDirectory = os.getcwd() # noqa: PTH109, N806
- pathFile = os.path.join(workingDirectory, 'Model.tcl')
+ pathFile = os.path.join(workingDirectory, 'Model.tcl') # noqa: PTH118, N806
- if os.path.exists(pathFile):
- os.remove(pathFile)
+ if os.path.exists(pathFile): # noqa: PTH110
+ os.remove(pathFile) # noqa: PTH107
- with open(pathFile, 'a+') as file_object:
- appendEOL = False
+ with open(pathFile, 'a+') as file_object: # noqa: PTH123
+ appendEOL = False # noqa: N806
# Move read cursor to the start of file.
file_object.seek(0)
@@ -239,93 +239,93 @@ def save_as_file(self):
data = file_object.read(100)
if len(data) > 0:
- appendEOL = True
+ appendEOL = True # noqa: N806
# Iterate over each string in the list
for line in self.list_of_lines:
# If file is not empty then append '\n' before first line for
# other lines always append '\n' before appending line
- if appendEOL == True:
+ if appendEOL == True: # noqa: E712
file_object.write('\n')
else:
- appendEOL = True
+ appendEOL = True # noqa: N806
# Append element at the end of file
file_object.write(line)
# print(self.list_of_lines)
-def runBuildingDesign(BIM_file, EVENT_file, SAM_file, getRV):
+def runBuildingDesign(BIM_file, EVENT_file, SAM_file, getRV): # noqa: ARG001, N802, N803, D103
# Get the current directory
- workingDirectory = os.getcwd()
+ workingDirectory = os.getcwd() # noqa: PTH109, N806, F841
- rootSIM = {}
+ rootSIM = {} # noqa: N806
# Try to open the BIM json
- with open(BIM_file, encoding='utf-8') as f:
- rootBIM = json.load(f)
+ with open(BIM_file, encoding='utf-8') as f: # noqa: PTH123
+ rootBIM = json.load(f) # noqa: N806
try:
# rootSIM = rootBIM['StructuralInformation']
- rootSIM = rootBIM['Modeling']
+ rootSIM = rootBIM['Modeling'] # noqa: N806
# KZ: append simulation attribute
rootSIM['Simulation'] = rootBIM.get('Simulation', None)
- except:
- raise ValueError('RC_FIAP - structural information missing')
+ except: # noqa: E722
+ raise ValueError('RC_FIAP - structural information missing') # noqa: B904, EM101, TRY003
# Get the random variables from the input file
try:
- rootRV = rootBIM['randomVariables']
- except:
- raise ValueError('RC_FIAP - randomVariables section missing')
+ rootRV = rootBIM['randomVariables'] # noqa: N806
+ except: # noqa: E722
+ raise ValueError('RC_FIAP - randomVariables section missing') # noqa: B904, EM101, TRY003
- RV_ARRAY = {}
+ RV_ARRAY = {} # noqa: N806
# Populate the RV array with name/value pairs.
# If a random variable is used here, the RV array will contain its current value
for rv in rootRV:
# Try to get the name and value of the random variable
- rvName = rv['name']
- curVal = rv['value']
+ rvName = rv['name'] # noqa: N806
+ curVal = rv['value'] # noqa: N806
# Check if the current value a realization of a RV, i.e., is not a RV label
# If so, then set the current value as the mean
if 'RV' in str(curVal):
- curVal = float(rv['mean'])
+ curVal = float(rv['mean']) # noqa: N806
RV_ARRAY[rvName] = curVal
# *********************** Design Starts Here *************************
# if getRV == "False":
if getRV is False:
- print('Running seismic design in FIAP')
+ print('Running seismic design in FIAP') # noqa: T201
# Create the tcl output logger
- outputLogger = TclLogger()
+ outputLogger = TclLogger() # noqa: N806
outputLogger.add_line(
'# Reinforced Concrete Frame Inelastic Analysis Platform (RCFIAP)',
- False,
+ False, # noqa: FBT003
)
outputLogger.add_line(
'# Developed by Victor Ceballos & Carlos Arteta',
- False,
+ False, # noqa: FBT003
)
outputLogger.add_line(
'# Modified by Stevan Gavrilovic - NHERI SimCenter for use in EE-UQ'
)
# Create a class object
- RCDes = RCFIAP()
+ RCDes = RCFIAP() # noqa: N806
- print('Starting seismic design')
+ print('Starting seismic design') # noqa: T201
# Run the building design
RCDes.Design(rootSIM)
- print('Creating nonlinear model')
+ print('Creating nonlinear model') # noqa: T201
# Run a pushover analysis - for testing to compare with original code
- doPushover = False
+ doPushover = False # noqa: N806
# Create the nonlinear model
RCDes.CreateNLM(rootSIM, outputLogger, doPushover)
@@ -333,12 +333,12 @@ def runBuildingDesign(BIM_file, EVENT_file, SAM_file, getRV):
# Save the output file from the logger
outputLogger.save_as_file()
- if doPushover == True:
- print('Running pushover analysis')
+ if doPushover == True: # noqa: E712
+ print('Running pushover analysis') # noqa: T201
RCDes.Pushover(rootSIM)
# Now create the SAM file for export
- root_SAM = {}
+ root_SAM = {} # noqa: N806
root_SAM['mainScript'] = 'Model.tcl'
root_SAM['type'] = 'OpenSeesInput'
@@ -356,18 +356,18 @@ def runBuildingDesign(BIM_file, EVENT_file, SAM_file, getRV):
root_SAM['ndf'] = 3
# The number of stories
- vecHeights = rootSIM['VecStoryHeights']
- vecHeights = vecHeights.split(',')
- vecHeights = np.array(vecHeights, dtype=float)
+ vecHeights = rootSIM['VecStoryHeights'] # noqa: N806
+ vecHeights = vecHeights.split(',') # noqa: N806
+ vecHeights = np.array(vecHeights, dtype=float) # noqa: N806
- numStories = len(vecHeights)
+ numStories = len(vecHeights) # noqa: N806
root_SAM['numStory'] = numStories
# The number of spans
- vecSpans = rootSIM['VecSpans']
- vecSpans = vecSpans.split(',')
- vecSpans = np.array(vecSpans, dtype=float)
- numSpans = len(vecSpans)
+ vecSpans = rootSIM['VecSpans'] # noqa: N806
+ vecSpans = vecSpans.split(',') # noqa: N806
+ vecSpans = np.array(vecSpans, dtype=float) # noqa: N806
+ numSpans = len(vecSpans) # noqa: N806
# Get the node mapping
# Consider a structure with 3 stories and 2 spans
@@ -383,15 +383,15 @@ def runBuildingDesign(BIM_file, EVENT_file, SAM_file, getRV):
# | | |
# #0 #1 #2
- clineOffset = 0
+ clineOffset = 0 # noqa: N806
if numSpans > 1:
- clineOffset = int(numSpans / 2)
+ clineOffset = int(numSpans / 2) # noqa: N806
node_map = []
# Using nodes on column #1 to calculate story drift
for i in range(numStories + 1):
- nodeTag = i * (numSpans + 1)
+ nodeTag = i * (numSpans + 1) # noqa: N806
# Create the node and add it to the node mapping array
node_entry = {}
@@ -409,34 +409,34 @@ def runBuildingDesign(BIM_file, EVENT_file, SAM_file, getRV):
root_SAM['NodeMapping'] = node_map
- with open(SAM_file, 'w') as f:
+ with open(SAM_file, 'w') as f: # noqa: PTH123
json.dump(root_SAM, f, indent=2)
# Main functionality
-class RCFIAP:
- def Design(self, rootSIM):
- def __init__(rootSIM):
+class RCFIAP: # noqa: D101
+ def Design(self, rootSIM): # noqa: C901, N802, N803, D102, PLR0915
+ def __init__(rootSIM): # noqa: N803, N807
self.rootSIM = rootSIM
- global Loc_span, Loc_heigth, ListNodes, Elements, DataBeamDesing, DataColDesing, WDL, WLL, WDLS, Wtotal, cover
+ global Loc_span, Loc_heigth, ListNodes, Elements, DataBeamDesing, DataColDesing, WDL, WLL, WDLS, Wtotal, cover # noqa: PLW0603
# Function: Reads Beams design data from table that allows the user to modify the default design from TAB2 of GUI
def data_beams_table(self):
self.registros_beams = []
- for DB in DataBeamDesing:
+ for DB in DataBeamDesing: # noqa: N806
b = DB.b / cm
h = DB.h / cm
- L_As_top = DB.Ast1 / cm**2
- L_As_bot = DB.Asb1 / cm**2
- R_As_top = DB.Ast2 / cm**2
- R_As_bot = DB.Asb2 / cm**2
- L_Leg_n = DB.ns1
- R_Leg_n = DB.ns2
- L_Sstirrup = DB.ss1 / cm
- R_Sstirrup = DB.ss2 / cm
- registro = RegistroBeams(
+ L_As_top = DB.Ast1 / cm**2 # noqa: N806
+ L_As_bot = DB.Asb1 / cm**2 # noqa: N806
+ R_As_top = DB.Ast2 / cm**2 # noqa: N806
+ R_As_bot = DB.Asb2 / cm**2 # noqa: N806
+ L_Leg_n = DB.ns1 # noqa: N806
+ R_Leg_n = DB.ns2 # noqa: N806
+ L_Sstirrup = DB.ss1 / cm # noqa: N806
+ R_Sstirrup = DB.ss2 / cm # noqa: N806
+ registro = RegistroBeams( # noqa: F821
DB.EleTag,
b,
h,
@@ -455,16 +455,16 @@ def data_beams_table(self):
def data_columns_table(self):
self.registros_cols = []
- for DC in DataColDesing:
+ for DC in DataColDesing: # noqa: N806
b = DC.b / cm
h = DC.h / cm
db = DC.db / mm
- nbH = DC.nbH
- nbB = DC.nbB
- nsH = DC.nsH
- nsB = DC.nsB
+ nbH = DC.nbH # noqa: N806
+ nbB = DC.nbB # noqa: N806
+ nsH = DC.nsH # noqa: N806
+ nsB = DC.nsB # noqa: N806
sst = DC.sst / cm
- registro = RegistroColumns(
+ registro = RegistroColumns( # noqa: F821
DC.EleTag, b, h, db, nbH, nbB, nsH, nsB, sst
)
self.registros_cols.append(registro)
@@ -472,24 +472,24 @@ def data_columns_table(self):
# Compression block parameters beta as function f'c
def beta1(fc):
if fc <= 28 * MPa:
- Beta1 = 0.85
+ Beta1 = 0.85 # noqa: N806
else:
- Beta1 = max([0.85 - 0.05 * (fc - 28.0) / 7.0, 0.65])
+ Beta1 = max([0.85 - 0.05 * (fc - 28.0) / 7.0, 0.65]) # noqa: N806
return Beta1
# Design load combinations
- def Combo_ACI(DL, LL, E):
- U1 = 1.2 * DL + 1.6 * LL
- U2 = 1.2 * DL + 1.0 * LL + 1.0 * E
- U3 = 1.2 * DL + 1.0 * LL - 1.0 * E
- U4 = 0.9 * DL + 1.0 * E
- U5 = 0.9 * DL - 1.0 * E
+ def Combo_ACI(DL, LL, E): # noqa: N802, N803
+ U1 = 1.2 * DL + 1.6 * LL # noqa: N806
+ U2 = 1.2 * DL + 1.0 * LL + 1.0 * E # noqa: N806
+ U3 = 1.2 * DL + 1.0 * LL - 1.0 * E # noqa: N806
+ U4 = 0.9 * DL + 1.0 * E # noqa: N806
+ U5 = 0.9 * DL - 1.0 * E # noqa: N806
return U1, U2, U3, U4, U5
# Flexural beams design
- def AsBeam(Mu, EleTag):
+ def AsBeam(Mu, EleTag): # noqa: N802, N803
b, h = BBeam, HBeam
- Mu = abs(Mu)
+ Mu = abs(Mu) # noqa: N806
db_v = np.array([4, 5, 6, 7, 8, 10])
for ndb in db_v:
db = ndb / 8.0 * inch
@@ -507,32 +507,32 @@ def AsBeam(Mu, EleTag):
)
)
ro_req = max(ro_req, ro_min_b)
- As_req = ro_req * b * d
- Ab = pi * db**2 / 4.0
+ As_req = ro_req * b * d # noqa: N806
+ Ab = pi * db**2 / 4.0 # noqa: N806
nb = max(2.0, ceil(As_req / Ab))
- As_con = nb * Ab
+ As_con = nb * Ab # noqa: N806
slb = (b - 2 * cover - 2 * dst - nb * db) / (
nb - 1.0
) # free clear bars
if slb >= max(1.0 * inch, db):
break
if ro_req > ro_max_b:
- print(
+ print( # noqa: T201
'Steel percentage greater than the maximum in Beam '
+ str(EleTag)
)
if slb < min(1.0 * inch, db):
- print('Bar separation is not ok in Beam ' + str(EleTag))
+ print('Bar separation is not ok in Beam ' + str(EleTag)) # noqa: T201
a = fy * As_con / 0.85 / fcB / b
- Mn = fy * As_con * (d - a / 2.0)
+ Mn = fy * As_con * (d - a / 2.0) # noqa: N806
return As_con, d, Mn, db
# Shear beams design
- def AvBeam(Vu, db, d, EleTag):
- Vc = 0.17 * sqrt(fcB / 1000.0) * MPa * BBeam * d
- Vs = (Vu - 0.75 * Vc) / 0.75
+ def AvBeam(Vu, db, d, EleTag): # noqa: N802, N803
+ Vc = 0.17 * sqrt(fcB / 1000.0) * MPa * BBeam * d # noqa: N806
+ Vs = (Vu - 0.75 * Vc) / 0.75 # noqa: N806
if Vs > 4.0 * Vc:
- print('reshape by shear in Beam ' + str(EleTag))
+ print('reshape by shear in Beam ' + str(EleTag)) # noqa: T201
se_1 = min(d / 4.0, 8.0 * db, 24.0 * dst, 300.0 * mm)
nr_v = np.array([2, 3, 4]) # vector de numero de ramas
if Vs <= 0.0:
@@ -540,50 +540,50 @@ def AvBeam(Vu, db, d, EleTag):
nra = 2.0
else:
for nra in nr_v:
- Ave = Ast * nra # area transversal del estribo
+ Ave = Ast * nra # area transversal del estribo # noqa: N806
se_2 = Ave * fy * d / Vs
se = min(se_1, se_2)
if se >= 60.0 * mm:
break
se = floor(se / cm) * cm
if se < 60.0 * mm:
- print('Stirrup spacing is less than 6 cm in beam ' + str(EleTag))
+ print('Stirrup spacing is less than 6 cm in beam ' + str(EleTag)) # noqa: T201
return nra, se
# Colmuns P-M design
- def AsColumn():
+ def AsColumn(): # noqa: C901, N802
verif = False
- while verif == False:
+ while verif == False: # noqa: E712
for ndb in db_v:
db = ndb / 8.0 * inch
- Ab = pi * db**2.0 / 4.0
+ Ab = pi * db**2.0 / 4.0 # noqa: N806
dp = cover + dst + 0.5 * db
d = h - dp
- for nbH in nbH_v:
- for nbB in nbB_v:
+ for nbH in nbH_v: # noqa: N806
+ for nbB in nbB_v: # noqa: N806
# numero total de barras
- nbT = 2.0 * (nbB + nbH - 2.0)
- Ast = nbT * Ab
+ nbT = 2.0 * (nbB + nbH - 2.0) # noqa: N806
+ Ast = nbT * Ab # noqa: N806
ro = Ast / b / h
- As = np.hstack(
+ As = np.hstack( # noqa: N806
[nbB * Ab, np.ones(nbH - 2) * 2 * Ab, nbB * Ab]
)
dist = np.linspace(dp, h - dp, nbH)
if ro >= ro_min:
- Pn_max = 0.80 * (
+ Pn_max = 0.80 * ( # noqa: N806
0.85 * fcC * (b * h - Ast) + fy * Ast
)
- Tn_max = -fy * Ast
+ Tn_max = -fy * Ast # noqa: N806
c = np.linspace(1.1 * h / npts, 1.1 * h, npts)
a = Beta1C * c
- Pconc = 0.85 * fcC * a * b
- Mconc = Pconc * (h - a) / 2.0
+ Pconc = 0.85 * fcC * a * b # noqa: N806
+ Mconc = Pconc * (h - a) / 2.0 # noqa: N806
et = ecu * (d - c) / c
fiv = np.copy(et)
- fiv = np.where(fiv >= 0.005, 0.9, fiv)
- fiv = np.where(fiv <= 0.002, 0.65, fiv)
+ fiv = np.where(fiv >= 0.005, 0.9, fiv) # noqa: PLR2004
+ fiv = np.where(fiv <= 0.002, 0.65, fiv) # noqa: PLR2004
fiv = np.where(
- (fiv > 0.002) & (fiv < 0.005),
+ (fiv > 0.002) & (fiv < 0.005), # noqa: PLR2004
(0.65 + 0.25 * (fiv - 0.002) / 0.003),
fiv,
)
@@ -592,9 +592,9 @@ def AsColumn():
fs = Es * es
fs = np.where(fs > fy, fy, fs)
fs = np.where(fs < -fy, -fy, fs)
- Pacer = np.sum(fs * As, axis=1)
- Macer = np.sum(fs * As * (h / 2.0 - dist), axis=1)
- Pn = np.hstack(
+ Pacer = np.sum(fs * As, axis=1) # noqa: N806
+ Macer = np.sum(fs * As * (h / 2.0 - dist), axis=1) # noqa: N806
+ Pn = np.hstack( # noqa: N806
[
Tn_max,
np.where(
@@ -605,22 +605,22 @@ def AsColumn():
Pn_max,
]
)
- Mn = np.hstack([0, Mconc + Macer, 0])
+ Mn = np.hstack([0, Mconc + Macer, 0]) # noqa: N806
fiv = np.hstack([0.9, fiv, 0.65])
- fiPn = fiv * Pn
- fiMn = fiv * Mn
+ fiPn = fiv * Pn # noqa: N806
+ fiMn = fiv * Mn # noqa: N806
if np.all((Pu_v >= min(fiPn)) & (Pu_v <= max(fiPn))):
- Mu_i = np.interp(Pu_v, fiPn, fiMn)
- Mn_i = np.interp(Pu_v, Pn, Mn)
- if np.all(Mu_i >= Mu_v) == True:
+ Mu_i = np.interp(Pu_v, fiPn, fiMn) # noqa: N806
+ Mn_i = np.interp(Pu_v, Pn, Mn) # noqa: N806
+ if np.all(Mu_i >= Mu_v) == True: # noqa: E712
verif = True
break
- if verif == True:
+ if verif == True: # noqa: E712
break
- if verif == True:
+ if verif == True: # noqa: E712
break
if ndb == db_v[-1] and ro > ro_max:
- print(
+ print( # noqa: T201
'column '
+ str(EleTag)
+ 'needs to be resized by reinforcement ratio'
@@ -629,38 +629,38 @@ def AsColumn():
return nbH, nbB, db, As, fiPn, fiMn, Mn_i, d, dist, ro, Mu_i
# Shear columns design
- def AvColumn():
+ def AvColumn(): # noqa: N802
fiv = 0.75
- Ag = b * h
+ Ag = b * h # noqa: N806
se_1 = min(
8.0 * db, b / 2.0, h / 2.0, 200.0 * mm
) # separacion minima c.18.4.3.3 ACI-19
dp = cover + dst + db / 2
d = h - dp
- neH = floor(nbH / 2) + 1
- neB = floor(nbB / 2) + 1
+ neH = floor(nbH / 2) + 1 # noqa: N806
+ neB = floor(nbB / 2) + 1 # noqa: N806
- Ash_H = neH * Ast
- Ash_B = neB * Ast
+ Ash_H = neH * Ast # noqa: N806, F841
+ Ash_B = neB * Ast # noqa: N806
- Vc = (0.17 * sqrt(fcC * MPa) + Nu_min / 6 / Ag) * b * d
- Vs = (Vu - fiv * Vc) / fiv
+ Vc = (0.17 * sqrt(fcC * MPa) + Nu_min / 6 / Ag) * b * d # noqa: N806
+ Vs = (Vu - fiv * Vc) / fiv # noqa: N806
if Vs <= 1 / 3 * sqrt(fcC * MPa) * b * d:
- se_1 = se_1
+ se_1 = se_1 # noqa: PLW0127
elif Vs >= 1 / 3 * sqrt(fcC * MPa) * b * d:
se_1 = min(se_1, h / 4)
if Vs > 0.66 * sqrt(fcC * MPa) * b * d:
- print('Resize the column' + str(EleTag) + ' by shear ')
+ print('Resize the column' + str(EleTag) + ' by shear ') # noqa: T201
if Vs <= 0.0:
se = se_1
else:
- Ave = Ash_B # area transversal del estribo
+ Ave = Ash_B # area transversal del estribo # noqa: N806
se_2 = Ave * fy * d / Vs
se = min([se_1, se_2])
if se < 60.0 * mm:
- print(
+ print( # noqa: T201
'Minimum spacing of stirrups is not met in column ' + str(EleTag)
)
return se, neB, neH
@@ -668,40 +668,40 @@ def AvColumn():
# Input geometric, materials and seismic design parameters from TAB1 of GUI
# Lafg = float(self.ui.Lafg.text())
- Lafg = float(rootSIM['TribLengthGravity'])
+ Lafg = float(rootSIM['TribLengthGravity']) # noqa: N806
# Lafs = float(self.ui.Lafs.text())
- Lafs = float(rootSIM['TribLengthSeismic'])
+ Lafs = float(rootSIM['TribLengthSeismic']) # noqa: N806
# DL = float(self.ui.DL.text())
- DL = float(rootSIM['DeadLoad'])
+ DL = float(rootSIM['DeadLoad']) # noqa: N806
# LL = float(self.ui.LL.text())
- LL = float(rootSIM['LiveLoad'])
+ LL = float(rootSIM['LiveLoad']) # noqa: N806
# HColi = float(self.ui.HColi.text()) # Column inside Depth
- HColi = float(rootSIM['IntColDepth'])
+ HColi = float(rootSIM['IntColDepth']) # noqa: N806
# BColi = float(self.ui.BColi.text()) # Column inside Width
- BColi = float(rootSIM['IntColWidth'])
+ BColi = float(rootSIM['IntColWidth']) # noqa: N806
# HCole = float(self.ui.HCole.text()) # Column outside Depth
- HCole = float(rootSIM['ExtColDepth'])
+ HCole = float(rootSIM['ExtColDepth']) # noqa: N806
# BCole = float(self.ui.BCole.text()) # Column outside Width
- BCole = float(rootSIM['ExtColWidth'])
+ BCole = float(rootSIM['ExtColWidth']) # noqa: N806
# HBeam = float(self.ui.HBeam.text())
- HBeam = float(rootSIM['BeamDepth'])
+ HBeam = float(rootSIM['BeamDepth']) # noqa: N806
# BBeam = float(self.ui.BBeam.text())
- BBeam = float(rootSIM['BeamWidth'])
+ BBeam = float(rootSIM['BeamWidth']) # noqa: N806
# IFC = float(self.ui.InertiaColumnsFactor.text())
- IFC = float(rootSIM['ColIg'])
+ IFC = float(rootSIM['ColIg']) # noqa: N806
# IFB = float(self.ui.InertiaBeamsFactor.text())
- IFB = float(rootSIM['BeamIg'])
+ IFB = float(rootSIM['BeamIg']) # noqa: N806
# heigth_v = self.ui.heigth_v.text()
heigth_v = rootSIM['VecStoryHeights']
@@ -719,28 +719,28 @@ def AvColumn():
fy = float(rootSIM['FySteel']) * MPa
# fcB = float(self.ui.fcB.text()) * MPa
- fcB = float(rootSIM['BeamFpc']) * MPa
+ fcB = float(rootSIM['BeamFpc']) * MPa # noqa: N806
# fcC = float(self.ui.fcC.text()) * MPa
- fcC = float(rootSIM['ColFpc']) * MPa
+ fcC = float(rootSIM['ColFpc']) * MPa # noqa: N806
# R = float(self.ui.R.text())
- R = float(rootSIM['RParam'])
+ R = float(rootSIM['RParam']) # noqa: N806
# Cd = float(self.ui.Cd.text())
- Cd = float(rootSIM['CdParam'])
+ Cd = float(rootSIM['CdParam']) # noqa: N806, F841
# Omo = float(self.ui.Omo.text())
- Omo = float(rootSIM['OmegaParam'])
+ Omo = float(rootSIM['OmegaParam']) # noqa: N806
# Sds = float(self.ui.Sds.text())
- Sds = float(rootSIM['SDSParam'])
+ Sds = float(rootSIM['SDSParam']) # noqa: N806
# Sd1 = float(self.ui.Sd1.text())
- Sd1 = float(rootSIM['SD1Param'])
+ Sd1 = float(rootSIM['SD1Param']) # noqa: N806
# Tl = float(self.ui.Tl.text())
- Tl = float(rootSIM['TLParam'])
+ Tl = float(rootSIM['TLParam']) # noqa: N806
WDL = Lafg * DL
WDLS = Lafs * DL
@@ -762,7 +762,7 @@ def AvColumn():
# print("BBeam: "+str(BBeam))
# print("IFC: "+str(IFC))
# print("IFB: "+str(IFB))
- print('********************fy: ', fy)
+ print('********************fy: ', fy) # noqa: T201
# print("fcB: "+str(fcB))
# print("fcC: "+str(fcC))
# print("R: "+str(R))
@@ -785,59 +785,59 @@ def AvColumn():
yn_vf = np.ravel(yn_v)
num_nodes = len(Loc_span) * len(Loc_heigth)
ListNodes = np.empty([num_nodes, 3])
- nodeTag = 0
+ nodeTag = 0 # noqa: N806
for xn, yn in zip(xn_vf, yn_vf):
ListNodes[nodeTag, :] = [nodeTag, xn, yn]
op.node(nodeTag, xn, yn)
if yn == 0.0:
op.fix(nodeTag, 1, 1, 1)
- nodeTag += 1
+ nodeTag += 1 # noqa: SIM113, N806
for node in ListNodes:
if node[2] > 0.0 and node[1] == 0.0:
- MasterNode = node[0]
+ MasterNode = node[0] # noqa: N806
if node[2] > 0.0 and node[1] != 0.0:
op.equalDOF(int(MasterNode), int(node[0]), 1)
- ListNodesDrift = ListNodes[np.where(ListNodes[:, 1] == 0.0)]
- MassType = '-lMass' # -lMass, -cMass
+ ListNodesDrift = ListNodes[np.where(ListNodes[:, 1] == 0.0)] # noqa: N806
+ MassType = '-lMass' # -lMass, -cMass # noqa: N806
# Columns creation for elastic analysis
op.geomTransf('Linear', 1, '-jntOffset', 0, 0, 0, -HBeam / 2)
op.geomTransf('Linear', 2, '-jntOffset', 0, HBeam / 2, 0, -HBeam / 2)
- AColi = BColi * HColi # cross-sectional area
- ACole = BCole * HCole # cross-sectional area
- EcC = 4700 * sqrt(fcC * MPa)
+ AColi = BColi * HColi # cross-sectional area # noqa: N806
+ ACole = BCole * HCole # cross-sectional area # noqa: N806
+ EcC = 4700 * sqrt(fcC * MPa) # noqa: N806
# Column moment of inertia
- IzColi = 1.0 / 12.0 * BColi * HColi**3
+ IzColi = 1.0 / 12.0 * BColi * HColi**3 # noqa: N806
# Column moment of inertia
- IzCole = 1.0 / 12.0 * BCole * HCole**3
- EleTag = 1
+ IzCole = 1.0 / 12.0 * BCole * HCole**3 # noqa: N806
+ EleTag = 1 # noqa: N806
Elements = []
- for Nod_ini in range(num_nodes):
+ for Nod_ini in range(num_nodes): # noqa: N806
if ListNodes[Nod_ini, 2] != Loc_heigth[-1]:
- Nod_end = Nod_ini + n_col_axes
+ Nod_end = Nod_ini + n_col_axes # noqa: N806
if ListNodes[Nod_ini, 2] == 0.0:
- gTr = 1
- RZi = 0
- RZe = HBeam / 2
- LCol = ListNodes[Nod_end, 2] - ListNodes[Nod_ini, 2] - RZi - RZe
+ gTr = 1 # noqa: N806
+ RZi = 0 # noqa: N806
+ RZe = HBeam / 2 # noqa: N806
+ LCol = ListNodes[Nod_end, 2] - ListNodes[Nod_ini, 2] - RZi - RZe # noqa: N806
else:
- gTr = 2
- RZi = HBeam / 2
- RZe = HBeam / 2
- LCol = ListNodes[Nod_end, 2] - ListNodes[Nod_ini, 2] - RZi - RZe
+ gTr = 2 # noqa: N806
+ RZi = HBeam / 2 # noqa: N806
+ RZe = HBeam / 2 # noqa: N806
+ LCol = ListNodes[Nod_end, 2] - ListNodes[Nod_ini, 2] - RZi - RZe # noqa: N806
if (
ListNodes[Nod_ini, 1] == 0.0
or ListNodes[Nod_ini, 1] == Loc_span[-1]
):
- BCol, HCol = BCole, HCole
- ACol = ACole
- IzCol = IFC * IzCole
+ BCol, HCol = BCole, HCole # noqa: N806
+ ACol = ACole # noqa: N806
+ IzCol = IFC * IzCole # noqa: N806
else:
- BCol, HCol = BColi, HColi
- ACol = AColi
- IzCol = IFC * IzColi
- MassDens = ACol * GConc / g
+ BCol, HCol = BColi, HColi # noqa: N806
+ ACol = AColi # noqa: N806
+ IzCol = IFC * IzColi # noqa: N806
+ MassDens = ACol * GConc / g # noqa: N806
Elements.append(
BeamElasticElement(
EleTag,
@@ -867,38 +867,38 @@ def AvColumn():
MassDens,
MassType,
)
- EleTag += 1
+ EleTag += 1 # noqa: N806
num_cols = EleTag
# Beams creation for elastic analysis
op.geomTransf('Linear', 3, '-jntOffset', HColi / 2.0, 0, -HColi / 2.0, 0)
op.geomTransf('Linear', 4, '-jntOffset', HCole / 2.0, 0, -HColi / 2.0, 0)
op.geomTransf('Linear', 5, '-jntOffset', HColi / 2.0, 0, -HCole / 2.0, 0)
- ABeam = BBeam * HBeam
- EcB = 4700 * sqrt(fcB * MPa)
- IzBeam = IFB * BBeam * HBeam**3 / 12
- MassDens = ABeam * GConc / g + WDLS / g
- for Nod_ini in range(num_nodes):
+ ABeam = BBeam * HBeam # noqa: N806
+ EcB = 4700 * sqrt(fcB * MPa) # noqa: N806
+ IzBeam = IFB * BBeam * HBeam**3 / 12 # noqa: N806
+ MassDens = ABeam * GConc / g + WDLS / g # noqa: N806
+ for Nod_ini in range(num_nodes): # noqa: N806
if (
ListNodes[Nod_ini, 1] != Loc_span[-1]
and ListNodes[Nod_ini, 2] != 0.0
):
- Nod_end = Nod_ini + 1
+ Nod_end = Nod_ini + 1 # noqa: N806
if ListNodes[Nod_ini, 1] == 0.0:
- gTr = 4
- RZi = HCole / 2.0
- RZe = HColi / 2.0
- LBeam = ListNodes[Nod_end, 1] - ListNodes[Nod_ini, 1] - RZi - RZe
+ gTr = 4 # noqa: N806
+ RZi = HCole / 2.0 # noqa: N806
+ RZe = HColi / 2.0 # noqa: N806
+ LBeam = ListNodes[Nod_end, 1] - ListNodes[Nod_ini, 1] - RZi - RZe # noqa: N806
elif ListNodes[Nod_ini, 1] == Loc_span[-2]:
- gTr = 5
- RZi = HColi / 2.0
- RZe = HCole / 2.0
- LBeam = ListNodes[Nod_end, 1] - ListNodes[Nod_ini, 1] - RZi - RZe
+ gTr = 5 # noqa: N806
+ RZi = HColi / 2.0 # noqa: N806
+ RZe = HCole / 2.0 # noqa: N806
+ LBeam = ListNodes[Nod_end, 1] - ListNodes[Nod_ini, 1] - RZi - RZe # noqa: N806
else:
- gTr = 3
- RZi = HColi / 2.0
- RZe = HColi / 2.0
- LBeam = ListNodes[Nod_end, 1] - ListNodes[Nod_ini, 1] - RZi - RZe
+ gTr = 3 # noqa: N806
+ RZi = HColi / 2.0 # noqa: N806
+ RZe = HColi / 2.0 # noqa: N806
+ LBeam = ListNodes[Nod_end, 1] - ListNodes[Nod_ini, 1] - RZi - RZe # noqa: N806
Elements.append(
BeamElasticElement(
EleTag,
@@ -928,25 +928,25 @@ def AvColumn():
MassDens,
MassType,
)
- EleTag += 1
+ EleTag += 1 # noqa: N806
num_elems = EleTag
- num_beams = num_elems - num_cols
+ num_beams = num_elems - num_cols # noqa: F841
# Create a Plain load pattern for gravity loading with a Linear TimeSeries
- Pvig = ABeam * GConc
- PColi = AColi * GConc
- PCole = ACole * GConc
+ Pvig = ABeam * GConc # noqa: N806
+ PColi = AColi * GConc # noqa: N806
+ PCole = ACole * GConc # noqa: N806
op.timeSeries('Linear', 1)
op.pattern('Plain', 1, 1)
- for Element in Elements:
+ for Element in Elements: # noqa: N806
if ListNodes[Element.Nod_ini, 1] == ListNodes[Element.Nod_end, 1]:
if (
ListNodes[Element.Nod_ini, 1] == 0.0
or ListNodes[Element.Nod_ini, 1] == Loc_span[-1]
):
- PCol = PCole
+ PCol = PCole # noqa: N806
else:
- PCol = PColi
+ PCol = PColi # noqa: N806
op.eleLoad('-ele', Element.EleTag, '-type', '-beamUniform', 0, -PCol)
if ListNodes[Element.Nod_ini, 2] == ListNodes[Element.Nod_end, 2]:
op.eleLoad(
@@ -960,18 +960,18 @@ def AvColumn():
op.algorithm('Linear')
op.analysis('Static')
op.analyze(1)
- ElemnsForceD = []
- for Element in Elements:
- Forces = op.eleForce(Element.EleTag)
+ ElemnsForceD = [] # noqa: N806
+ for Element in Elements: # noqa: N806
+ Forces = op.eleForce(Element.EleTag) # noqa: N806
Forces.insert(0, Element.EleTag)
ElemnsForceD.append(Forces)
- ElemnsForceD = np.array(ElemnsForceD)
+ ElemnsForceD = np.array(ElemnsForceD) # noqa: N806
Wtotal = np.sum(ElemnsForceD[: len(Loc_span), 2]) * Lafs / Lafg
op.loadConst('-time', 0.0)
op.timeSeries('Linear', 2)
op.pattern('Plain', 2, 1)
- for Element in Elements:
+ for Element in Elements: # noqa: N806
if ListNodes[Element.Nod_ini, 2] == ListNodes[Element.Nod_end, 2]:
op.eleLoad('-ele', Element.EleTag, '-type', '-beamUniform', -WLL)
op.analyze(1)
@@ -999,102 +999,102 @@ def AvColumn():
# self.ui.DataFrame.canvas.draw()
# self.ui.DataFrame.canvas.show()
- ElemnsForceDL = []
- for Element in Elements:
- Forces = op.eleForce(Element.EleTag)
+ ElemnsForceDL = [] # noqa: N806
+ for Element in Elements: # noqa: N806
+ Forces = op.eleForce(Element.EleTag) # noqa: N806
Forces.insert(0, Element.EleTag)
ElemnsForceDL.append(Forces)
- ElemnsForceDL = np.array(ElemnsForceDL)
+ ElemnsForceDL = np.array(ElemnsForceDL) # noqa: N806
# Create a Plain load pattern for seismic loading with a Linear TimeSeries (LLEF)
op.loadConst('-time', 0.0)
- Htotal = Loc_heigth[-1]
- Ct = 0.0466
+ Htotal = Loc_heigth[-1] # noqa: N806
+ Ct = 0.0466 # noqa: N806
x = 0.9
- Ta = Ct * Htotal**x
- print('Ta =', Ta)
- Ie = 1.0
- Ts = Sd1 / Sds
+ Ta = Ct * Htotal**x # noqa: N806
+ print('Ta =', Ta) # noqa: T201
+ Ie = 1.0 # noqa: N806
+ Ts = Sd1 / Sds # noqa: N806
if Ta <= Ts:
- Sa = max(Sds * Ie / R, 0.044 * Sds * Ie, 0.01)
+ Sa = max(Sds * Ie / R, 0.044 * Sds * Ie, 0.01) # noqa: N806
elif Ta <= Tl:
- Sa = max(Sd1 * Ie / Ta / R, 0.044 * Sds * Ie, 0.01)
+ Sa = max(Sd1 * Ie / Ta / R, 0.044 * Sds * Ie, 0.01) # noqa: N806
else:
- Sa = max(Sd1 * Tl * Ie / (Ta**2) / R, 0.044 * Sds * Ie, 0.01)
- if Ta <= 0.5:
+ Sa = max(Sd1 * Tl * Ie / (Ta**2) / R, 0.044 * Sds * Ie, 0.01) # noqa: N806
+ if Ta <= 0.5: # noqa: PLR2004
k = 1.0
- elif Ta <= 2.5:
+ elif Ta <= 2.5: # noqa: PLR2004
k = 0.75 + 0.5 * Ta
else:
k = 2.0
- sumH = np.sum(np.power(Loc_heigth, k))
+ sumH = np.sum(np.power(Loc_heigth, k)) # noqa: N806
op.timeSeries('Linear', 3)
op.pattern('Plain', 3, 1)
- print('Wtotal =', Wtotal)
- Fp = Sa * Wtotal * np.power(Loc_heigth, k) / sumH
- print('FSis =', Fp)
+ print('Wtotal =', Wtotal) # noqa: T201
+ Fp = Sa * Wtotal * np.power(Loc_heigth, k) / sumH # noqa: N806
+ print('FSis =', Fp) # noqa: T201
for fp, ind in zip(Fp, range(len(Loc_heigth))):
op.load(int(ListNodesDrift[ind, 0]), fp, 0.0, 0.0)
- Vbasal = Sa * Wtotal
+ Vbasal = Sa * Wtotal # noqa: N806, F841
op.analyze(1)
- ElemnsForceDLE = []
- for Element in Elements:
- Forces = op.eleForce(Element.EleTag)
+ ElemnsForceDLE = [] # noqa: N806
+ for Element in Elements: # noqa: N806
+ Forces = op.eleForce(Element.EleTag) # noqa: N806
Forces.insert(0, Element.EleTag)
ElemnsForceDLE.append(Forces)
- ElemnsForceDLE = np.array(ElemnsForceDLE)
+ ElemnsForceDLE = np.array(ElemnsForceDLE) # noqa: N806
np.set_printoptions(precision=6)
np.set_printoptions(suppress=True)
# Story drift calculations
- DriftMax = 0.02
- nodesDisp = []
- Id_Node_Drift = ListNodesDrift[:, 0]
- Id_Node_Drift = np.int64(Id_Node_Drift)
- Id_Node_Drift = Id_Node_Drift.tolist()
+ DriftMax = 0.02 # noqa: N806
+ nodesDisp = [] # noqa: N806
+ Id_Node_Drift = ListNodesDrift[:, 0] # noqa: N806
+ Id_Node_Drift = np.int64(Id_Node_Drift) # noqa: N806
+ Id_Node_Drift = Id_Node_Drift.tolist() # noqa: N806
for nodo in Id_Node_Drift:
- nodesDisp.append([nodo, op.nodeDisp(nodo, 1)])
- nodesDisp = np.array(nodesDisp)
+ nodesDisp.append([nodo, op.nodeDisp(nodo, 1)]) # noqa: PERF401
+ nodesDisp = np.array(nodesDisp) # noqa: N806
drift = nodesDisp[1:, 1] - nodesDisp[:-1, 1]
drift_p = np.divide(drift, np.array(heigth_v))
ver_drift = np.where(drift_p < DriftMax, 'ok', 'not ok')
- Id_Floor = np.arange(1, len(Loc_heigth))
+ Id_Floor = np.arange(1, len(Loc_heigth)) # noqa: N806
drift_table = pd.DataFrame(
{'1.Floor': Id_Floor, '2.Drift': drift_p * 100, '3.': ver_drift}
)
- print(drift_table)
+ print(drift_table) # noqa: T201
# Beams and columns design procedures
- Beta1B = beta1(fcB)
+ Beta1B = beta1(fcB) # noqa: N806
cover = 4 * cm
dst = 3 / 8 * inch
- Ast = pi * dst**2 / 4.0 # area de la barra del estribo
+ Ast = pi * dst**2 / 4.0 # area de la barra del estribo # noqa: N806
ro_max_b = 0.85 * Beta1B * fcB * 3.0 / fy / 8.0 # maximum steel percentage
ro_min_b = max(
0.25 * sqrt(fcB / MPa) * MPa / fy, 1.4 * MPa / fy
) # minimum steel percentage
DataBeamDesing = []
- for Ele, EleForceD, EleForceDL, EleForceDLE in zip(
+ for Ele, EleForceD, EleForceDL, EleForceDLE in zip( # noqa: N806
Elements, ElemnsForceD, ElemnsForceDL, ElemnsForceDLE
):
if ListNodes[Ele.Nod_ini, 2] == ListNodes[Ele.Nod_end, 2]:
- VID = EleForceD[2]
- VIL = EleForceDL[2] - VID
- VIE = EleForceDLE[2] - VID - VIL
- VED = abs(EleForceD[5])
- VEL = abs(EleForceDL[5]) - VED
- VEE = abs(EleForceDLE[5]) - VED - VEL
-
- MID = EleForceD[3] - EleForceD[2] * Ele.RZi
- MIL = EleForceDL[3] - EleForceDL[2] * Ele.RZi - MID
- MIE = EleForceDLE[3] - EleForceDLE[2] * Ele.RZi - MID - MIL
- MED = EleForceD[6] + EleForceD[5] * Ele.RZe
- MEL = EleForceDL[6] + EleForceDL[5] * Ele.RZe - MED
- MEE = EleForceDLE[6] + EleForceDLE[5] * Ele.RZe - MED - MEL
- MED, MEL, MEE = -MED, -MEL, -MEE
- print(
+ VID = EleForceD[2] # noqa: N806
+ VIL = EleForceDL[2] - VID # noqa: N806
+ VIE = EleForceDLE[2] - VID - VIL # noqa: N806
+ VED = abs(EleForceD[5]) # noqa: N806
+ VEL = abs(EleForceDL[5]) - VED # noqa: N806
+ VEE = abs(EleForceDLE[5]) - VED - VEL # noqa: N806
+
+ MID = EleForceD[3] - EleForceD[2] * Ele.RZi # noqa: N806
+ MIL = EleForceDL[3] - EleForceDL[2] * Ele.RZi - MID # noqa: N806
+ MIE = EleForceDLE[3] - EleForceDLE[2] * Ele.RZi - MID - MIL # noqa: N806
+ MED = EleForceD[6] + EleForceD[5] * Ele.RZe # noqa: N806
+ MEL = EleForceDL[6] + EleForceDL[5] * Ele.RZe - MED # noqa: N806
+ MEE = EleForceDLE[6] + EleForceDLE[5] * Ele.RZe - MED - MEL # noqa: N806
+ MED, MEL, MEE = -MED, -MEL, -MEE # noqa: N806
+ print( # noqa: T201
'MID ',
MID,
'MED',
@@ -1108,60 +1108,60 @@ def AvColumn():
'MEE',
MEE,
)
- MI1, MI2, MI3, MI4, MI5 = Combo_ACI(MID, MIL, MIE)
- MNU1 = max(
+ MI1, MI2, MI3, MI4, MI5 = Combo_ACI(MID, MIL, MIE) # noqa: N806
+ MNU1 = max( # noqa: N806
[MI1, MI2, MI3, MI4, MI5, 0.0]
) # Momento negativo nudo inicial de diseño
- MPU1 = min(
+ MPU1 = min( # noqa: N806
[MI1, MI2, MI3, MI4, MI5, abs(MNU1) / 3]
) # Momento positivo nudo inicial de diseño
- ME1, ME2, ME3, ME4, ME5 = Combo_ACI(MED, MEL, MEE)
- MNU2 = max(
+ ME1, ME2, ME3, ME4, ME5 = Combo_ACI(MED, MEL, MEE) # noqa: N806
+ MNU2 = max( # noqa: N806
[ME1, ME2, ME3, ME4, ME5, 0.0]
) # Momento negativo nudo final de diseño
- MPU2 = min(
+ MPU2 = min( # noqa: N806
[ME1, ME2, ME3, ME4, ME5, abs(MNU2) / 3]
) # Momento positivo nudo final de diseño
- Mmax = max([MNU1, -MPU1, MNU2, -MPU2])
- MNU1 = max([MNU1, Mmax / 5])
- MPU1 = min([MPU1, -Mmax / 5])
- MNU2 = max([MNU2, Mmax / 5])
- MPU2 = min([MPU2, -Mmax / 5])
-
- Ast1, dt1, Mn_N1, db_t1 = AsBeam(MNU1, Ele.EleTag)
- Asb1, db1, Mn_P1, db_b1 = AsBeam(MPU1, Ele.EleTag)
- Ast2, dt2, Mn_N2, db_t2 = AsBeam(MNU2, Ele.EleTag)
- Asb2, db2, Mn_P2, db_b2 = AsBeam(MPU2, Ele.EleTag)
-
- VI1 = 1.2 * VID + 1.6 * VIL
- VI2 = 1.2 * VID + 1.0 * VIL - 1.0 * VIE
- VI3 = 0.9 * VID - 1.0 * VIE
- VI4 = (Mn_P1 + Mn_N2) / Ele.LEle + (1.2 * WDL + WLL) * Ele.LEle / 2.0
- VI5 = (Mn_N1 + Mn_P2) / Ele.LEle + (1.2 * WDL + WLL) * Ele.LEle / 2.0
- VI6 = 1.2 * VID + 1.0 * VIL - 2.0 * VIE
- VI7 = 0.9 * VID - 2.0 * VIE
-
- VU1a = max(VI1, VI2, VI3)
- VU1b = max(VI4, VI5)
- VU1c = max(VI6, VI7)
-
- VU1 = max(
+ Mmax = max([MNU1, -MPU1, MNU2, -MPU2]) # noqa: N806
+ MNU1 = max([MNU1, Mmax / 5]) # noqa: N806
+ MPU1 = min([MPU1, -Mmax / 5]) # noqa: N806
+ MNU2 = max([MNU2, Mmax / 5]) # noqa: N806
+ MPU2 = min([MPU2, -Mmax / 5]) # noqa: N806
+
+ Ast1, dt1, Mn_N1, db_t1 = AsBeam(MNU1, Ele.EleTag) # noqa: N806
+ Asb1, db1, Mn_P1, db_b1 = AsBeam(MPU1, Ele.EleTag) # noqa: N806
+ Ast2, dt2, Mn_N2, db_t2 = AsBeam(MNU2, Ele.EleTag) # noqa: N806
+ Asb2, db2, Mn_P2, db_b2 = AsBeam(MPU2, Ele.EleTag) # noqa: N806
+
+ VI1 = 1.2 * VID + 1.6 * VIL # noqa: N806
+ VI2 = 1.2 * VID + 1.0 * VIL - 1.0 * VIE # noqa: N806
+ VI3 = 0.9 * VID - 1.0 * VIE # noqa: N806
+ VI4 = (Mn_P1 + Mn_N2) / Ele.LEle + (1.2 * WDL + WLL) * Ele.LEle / 2.0 # noqa: N806
+ VI5 = (Mn_N1 + Mn_P2) / Ele.LEle + (1.2 * WDL + WLL) * Ele.LEle / 2.0 # noqa: N806
+ VI6 = 1.2 * VID + 1.0 * VIL - 2.0 * VIE # noqa: N806
+ VI7 = 0.9 * VID - 2.0 * VIE # noqa: N806
+
+ VU1a = max(VI1, VI2, VI3) # noqa: N806
+ VU1b = max(VI4, VI5) # noqa: N806
+ VU1c = max(VI6, VI7) # noqa: N806
+
+ VU1 = max( # noqa: N806
VU1a, min(VU1b, VU1c)
) # Cortante negativo nudo inicial de diseño
- VE1 = 1.2 * VED + 1.6 * VEL
- VE2 = 1.2 * VED + 1.0 * VEL + 1.0 * VEE
- VE3 = 0.9 * VED + 1.0 * VEE
- VE4 = (Mn_P1 + Mn_N2) / Ele.LEle + (1.2 * WDL + WLL) * Ele.LEle / 2.0
- VE5 = (Mn_N1 + Mn_P2) / Ele.LEle + (1.2 * WDL + WLL) * Ele.LEle / 2.0
- VE6 = 1.2 * VED + 1.0 * VEL + 2.0 * VEE
- VE7 = 0.9 * VED + 2.0 * VEE
+ VE1 = 1.2 * VED + 1.6 * VEL # noqa: N806
+ VE2 = 1.2 * VED + 1.0 * VEL + 1.0 * VEE # noqa: N806
+ VE3 = 0.9 * VED + 1.0 * VEE # noqa: N806
+ VE4 = (Mn_P1 + Mn_N2) / Ele.LEle + (1.2 * WDL + WLL) * Ele.LEle / 2.0 # noqa: N806
+ VE5 = (Mn_N1 + Mn_P2) / Ele.LEle + (1.2 * WDL + WLL) * Ele.LEle / 2.0 # noqa: N806
+ VE6 = 1.2 * VED + 1.0 * VEL + 2.0 * VEE # noqa: N806
+ VE7 = 0.9 * VED + 2.0 * VEE # noqa: N806
- VU2a = max(VE1, VE2, VE3)
- VU2b = max(VE4, VE5)
- VU2c = max(VE6, VE7)
+ VU2a = max(VE1, VE2, VE3) # noqa: N806
+ VU2b = max(VE4, VE5) # noqa: N806
+ VU2c = max(VE6, VE7) # noqa: N806
- VU2 = max(
+ VU2 = max( # noqa: N806
VU2a, min(VU2b, VU2c)
) # Cortante negativo nudo final de diseño
@@ -1203,49 +1203,49 @@ def AvColumn():
# Column design procedure
ro_min = 0.01
ro_max = 0.08
- Beta1C = beta1(fcC)
+ Beta1C = beta1(fcC) # noqa: N806
npts = 20
- ncom = 10
+ ncom = 10 # noqa: F841
ecu = 0.003
- Es = 200.0 * GPa
+ Es = 200.0 * GPa # noqa: N806
db_v = np.array(
[5, 6, 7, 8, 9, 10, 11, 14, 18]
) # vector de diametros de barras
DataColDesing = []
- for Ele, EleForceD, EleForceDL, EleForceDLE in zip(
+ for Ele, EleForceD, EleForceDL, EleForceDLE in zip( # noqa: N806
Elements, ElemnsForceD, ElemnsForceDL, ElemnsForceDLE
):
if ListNodes[Ele.Nod_ini, 1] == ListNodes[Ele.Nod_end, 1]:
- Mn_N_R, Mn_P_R, Mn_N_L, Mn_P_L = 0, 0, 0, 0
- for DB in DataBeamDesing:
+ Mn_N_R, Mn_P_R, Mn_N_L, Mn_P_L = 0, 0, 0, 0 # noqa: N806
+ for DB in DataBeamDesing: # noqa: N806
if Ele.Nod_end == DB.Nod_ini:
- Mn_N_R, Mn_P_R = DB.Mn_n1, DB.Mn_p1
+ Mn_N_R, Mn_P_R = DB.Mn_n1, DB.Mn_p1 # noqa: N806
if Ele.Nod_end == DB.Nod_end:
- Mn_N_L, Mn_P_L = DB.Mn_n2, DB.Mn_p2
- Sum_Mn_B = max(Mn_P_R + Mn_N_L, Mn_N_R + Mn_P_L)
+ Mn_N_L, Mn_P_L = DB.Mn_n2, DB.Mn_p2 # noqa: N806
+ Sum_Mn_B = max(Mn_P_R + Mn_N_L, Mn_N_R + Mn_P_L) # noqa: N806, F841
b, h = Ele.BEle, Ele.HEle
- nbB = ceil(b * 10) # bars numbers along B
- nbH = ceil(h * 10) # bars numbers along H
- D_c = 1.1 * h / npts
- nbH_v = np.array([nbH - 1, nbH, nbH + 1])
- nbB_v = np.array([nbB - 1, nbB, nbB + 1])
-
- MID = EleForceD[3]
- MIL = EleForceDL[3] - MID
- MIE = EleForceDLE[3] - MID - MIL
-
- PID = EleForceD[2]
- PIL = EleForceDL[2] - PID
- PIE = EleForceDLE[2] - PID - PIL
-
- MI1, MI2, MI3, MI4, MI5 = Combo_ACI(MID, MIL, MIE)
- PI1, PI2, PI3, PI4, PI5 = Combo_ACI(PID, PIL, PIE)
-
- MED = -EleForceD[6]
- MEL = -EleForceDL[6] - MED
- MEE = -EleForceDLE[6] - MED - MEL
- print(
+ nbB = ceil(b * 10) # bars numbers along B # noqa: N806
+ nbH = ceil(h * 10) # bars numbers along H # noqa: N806
+ D_c = 1.1 * h / npts # noqa: N806, F841
+ nbH_v = np.array([nbH - 1, nbH, nbH + 1]) # noqa: N806
+ nbB_v = np.array([nbB - 1, nbB, nbB + 1]) # noqa: N806
+
+ MID = EleForceD[3] # noqa: N806
+ MIL = EleForceDL[3] - MID # noqa: N806
+ MIE = EleForceDLE[3] - MID - MIL # noqa: N806
+
+ PID = EleForceD[2] # noqa: N806
+ PIL = EleForceDL[2] - PID # noqa: N806
+ PIE = EleForceDLE[2] - PID - PIL # noqa: N806
+
+ MI1, MI2, MI3, MI4, MI5 = Combo_ACI(MID, MIL, MIE) # noqa: N806
+ PI1, PI2, PI3, PI4, PI5 = Combo_ACI(PID, PIL, PIE) # noqa: N806
+
+ MED = -EleForceD[6] # noqa: N806
+ MEL = -EleForceDL[6] - MED # noqa: N806
+ MEE = -EleForceDLE[6] - MED - MEL # noqa: N806
+ print( # noqa: T201
'MID ',
MID,
'MED',
@@ -1260,44 +1260,44 @@ def AvColumn():
MEE,
)
- PED = -EleForceD[5]
- PEL = -EleForceDL[5] - PED
- PEE = -EleForceDLE[5] - PED - PEL
+ PED = -EleForceD[5] # noqa: N806
+ PEL = -EleForceDL[5] - PED # noqa: N806
+ PEE = -EleForceDLE[5] - PED - PEL # noqa: N806
- ME1, ME2, ME3, ME4, ME5 = Combo_ACI(MED, MEL, MEE)
- PE1, PE2, PE3, PE4, PE5 = Combo_ACI(PED, PEL, PEE)
+ ME1, ME2, ME3, ME4, ME5 = Combo_ACI(MED, MEL, MEE) # noqa: N806
+ PE1, PE2, PE3, PE4, PE5 = Combo_ACI(PED, PEL, PEE) # noqa: N806
- Nu_min = min([PI2, PI3, PI4, PI5, PE2, PE3, PE4, PE5])
+ Nu_min = min([PI2, PI3, PI4, PI5, PE2, PE3, PE4, PE5]) # noqa: N806
- Pu_v = np.array([PI1, PI2, PI3, PI4, PI5, PE1, PE2, PE3, PE4, PE5])
- Mu_v = np.array([MI1, MI2, MI3, MI4, MI5, ME1, ME2, ME3, ME4, ME5])
- Mu_v = np.absolute(Mu_v)
+ Pu_v = np.array([PI1, PI2, PI3, PI4, PI5, PE1, PE2, PE3, PE4, PE5]) # noqa: N806
+ Mu_v = np.array([MI1, MI2, MI3, MI4, MI5, ME1, ME2, ME3, ME4, ME5]) # noqa: N806
+ Mu_v = np.absolute(Mu_v) # noqa: N806
- nbH, nbB, db, As, fiPn, fiMn, Mn_i, d, dist, ro, Mu_i = AsColumn()
+ nbH, nbB, db, As, fiPn, fiMn, Mn_i, d, dist, ro, Mu_i = AsColumn() # noqa: N806
- VID = EleForceD[1]
- VIL = EleForceDL[1] - VID
- VIE = EleForceDLE[1] - VID - VIL
- VID, VIL, VIE = abs(VID), abs(VIL), abs(VIE)
+ VID = EleForceD[1] # noqa: N806
+ VIL = EleForceDL[1] - VID # noqa: N806
+ VIE = EleForceDLE[1] - VID - VIL # noqa: N806
+ VID, VIL, VIE = abs(VID), abs(VIL), abs(VIE) # noqa: N806
- Mu_is = Mu_i[[1, 2, 3, 4, 6, 7, 8, 9]]
+ Mu_is = Mu_i[[1, 2, 3, 4, 6, 7, 8, 9]] # noqa: N806
# Momento maximo de todas las combo sismicas
- Mn_max = np.max(Mu_is)
- VI1, VI2, VI3, VI4, VI5 = Combo_ACI(VID, VIL, VIE)
+ Mn_max = np.max(Mu_is) # noqa: N806
+ VI1, VI2, VI3, VI4, VI5 = Combo_ACI(VID, VIL, VIE) # noqa: N806
- VI6 = 2.0 * Mn_max / Ele.LEle
- VI7 = 1.2 * VID + 1.0 * VIL + Omo * VIE
- VI8 = 1.2 * VID + 1.0 * VIL - Omo * VIE
- VI9 = 0.9 * VID + Omo * VIE
- VI10 = 0.9 * VID - Omo * VIE
+ VI6 = 2.0 * Mn_max / Ele.LEle # noqa: N806
+ VI7 = 1.2 * VID + 1.0 * VIL + Omo * VIE # noqa: N806
+ VI8 = 1.2 * VID + 1.0 * VIL - Omo * VIE # noqa: N806
+ VI9 = 0.9 * VID + Omo * VIE # noqa: N806
+ VI10 = 0.9 * VID - Omo * VIE # noqa: N806
- VUa = max([VI1, VI2, VI3, VI4, VI5])
- VUb = VI6
- VUc = max([VI7, VI8, VI9, VI10])
+ VUa = max([VI1, VI2, VI3, VI4, VI5]) # noqa: N806
+ VUb = VI6 # noqa: N806
+ VUc = max([VI7, VI8, VI9, VI10]) # noqa: N806
# Cortante maximo de diseño
- Vu = max([VUa, min([VUb, VUc])])
- sst, nsB, nsH = AvColumn()
+ Vu = max([VUa, min([VUb, VUc])]) # noqa: N806
+ sst, nsB, nsH = AvColumn() # noqa: N806
DataColDesing.append(
ColDesing(
Ele.EleTag,
@@ -1329,22 +1329,22 @@ def AvColumn():
# self.ui.tabWidget.setCurrentIndex(1)
# Creation of the nonlinear model
- def CreateNLM(self, rootSIM, outputLogger, preparePushover):
- def __init__(rootSIM):
+ def CreateNLM(self, rootSIM, outputLogger, preparePushover): # noqa: C901, N802, N803, D102, PLR0915
+ def __init__(rootSIM): # noqa: N803, N807
self.rootSIM = rootSIM
self.outputLogger = outputLogger
- global T1m, T2m, EleCol, EleBeam
+ global T1m, T2m, EleCol, EleBeam # noqa: PLW0603
# Validation of beam and column design table data
- def validate_data(self):
+ def validate_data(self): # noqa: ARG001
cover = 4 * cm
dst = 3 / 8 * inch
- for DC in DataColDesing:
+ for DC in DataColDesing: # noqa: N806
dp = cover + dst + 0.5 * DC.db
DC.dist = np.linspace(dp, DC.h - dp, DC.nbH)
- Ab = pi * DC.db**2.0 / 4.0
+ Ab = pi * DC.db**2.0 / 4.0 # noqa: N806
DC.As = np.hstack(
[DC.nbB * Ab, np.ones(DC.nbH - 2) * 2 * Ab, DC.nbB * Ab]
)
@@ -1366,22 +1366,22 @@ def validate_data(self):
def con_inconf_regu():
fpc = -fc
epsc0 = 2 * fpc / Ec
- Gfc = max(2.0 * (-fpc / MPa) * N / mm, 25.0 * N / mm)
+ Gfc = max(2.0 * (-fpc / MPa) * N / mm, 25.0 * N / mm) # noqa: N806
epscu = Gfc / (0.6 * fpc * phl) - 0.8 * fpc / Ec + epsc0
fcu = 0.2 * fpc
- lambdaU = 0.10
+ lambdaU = 0.10 # noqa: N806
ft = 0.33 * sqrt(-fpc * MPa)
- Ets = ft / 0.002
+ Ets = ft / 0.002 # noqa: N806
return fpc, epsc0, fcu, epscu, lambdaU, ft, Ets
# Function: Parameters of regularized confined concrete
- def con_conf_regu(b, h, nsB, nsH, sst):
+ def con_conf_regu(b, h, nsB, nsH, sst): # noqa: N803
fpc = -fc
bcx = h - 2.0 * cover - dst
bcy = b - 2.0 * cover - dst
- Asx = nsB * Ast
- Asy = nsH * Ast
- Asvt = Asx + Asy
+ Asx = nsB * Ast # noqa: N806
+ Asy = nsH * Ast # noqa: N806
+ Asvt = Asx + Asy # noqa: N806
flx = Asvt * fy / sst / bcx
fly = Asvt * fy / sst / bcy
slx = bcx / (nsB - 1)
@@ -1394,16 +1394,16 @@ def con_conf_regu(b, h, nsB, nsH, sst):
k1 = 6.7 * (fl_e / 1000.0) ** (-0.17)
fcc = fc + k1 * fl_e
fpcc = -fcc
- Ecc = Ec
- Gfc = max(2.0 * (-fpc / MPa) * N / mm, 25.0 * N / mm)
- K = k1 * fl_e / fc
+ Ecc = Ec # noqa: N806
+ Gfc = max(2.0 * (-fpc / MPa) * N / mm, 25.0 * N / mm) # noqa: N806
+ K = k1 * fl_e / fc # noqa: N806
epscc0 = eo1 * (1.0 + 5.0 * K)
- Gfcc = 1.7 * Gfc
+ Gfcc = 1.7 * Gfc # noqa: N806
epsccu = Gfcc / (0.6 * fpcc * phl) - 0.8 * fpcc / Ecc + epscc0
fccu = 0.2 * fpcc
- lambdaC = 0.10
+ lambdaC = 0.10 # noqa: N806
ft = 0.33 * sqrt(-fpc * MPa)
- Ets = ft / 0.002
+ Ets = ft / 0.002 # noqa: N806
# print("**** sst",sst)
# print("**** fpc",fpc)
@@ -1439,45 +1439,45 @@ def con_conf_regu(b, h, nsB, nsH, sst):
# Function: Regularized steel parameters
def steel_mat_regu():
- FyTestN4 = 490.0 * MPa
- FsuTestN4 = 630.0 * MPa
- epsuTestN4 = 0.10
- LgageTestN4 = 200.0 * mm
- Es = 200.0 * GPa
- FyPosN4 = FyTestN4
- epsyPosN4 = FyPosN4 / Es
- FyNegN4 = FyTestN4
- epsyNegN4 = FyNegN4 / Es
- FsuPosN4 = FsuTestN4
- epsuPosN4 = epsyPosN4 + LgageTestN4 / phl * (epsuTestN4 - epsyPosN4)
- bPosN4 = (FsuPosN4 - FyPosN4) / (Es * (epsuPosN4 - epsyPosN4))
- epsuNegN4 = min(-epsccu, epsuPosN4)
- bNegN4 = bPosN4
+ FyTestN4 = 490.0 * MPa # noqa: N806
+ FsuTestN4 = 630.0 * MPa # noqa: N806
+ epsuTestN4 = 0.10 # noqa: N806
+ LgageTestN4 = 200.0 * mm # noqa: N806
+ Es = 200.0 * GPa # noqa: N806
+ FyPosN4 = FyTestN4 # noqa: N806
+ epsyPosN4 = FyPosN4 / Es # noqa: N806
+ FyNegN4 = FyTestN4 # noqa: N806
+ epsyNegN4 = FyNegN4 / Es # noqa: N806
+ FsuPosN4 = FsuTestN4 # noqa: N806
+ epsuPosN4 = epsyPosN4 + LgageTestN4 / phl * (epsuTestN4 - epsyPosN4) # noqa: N806
+ bPosN4 = (FsuPosN4 - FyPosN4) / (Es * (epsuPosN4 - epsyPosN4)) # noqa: N806
+ epsuNegN4 = min(-epsccu, epsuPosN4) # noqa: N806
+ bNegN4 = bPosN4 # noqa: N806
# FsuNegN4 = FsuTestN4
- FsuNegN4 = FyNegN4 + bNegN4 * (Es * (epsuNegN4 - epsyNegN4))
- FsrPosN4 = 0.2 * FyPosN4
- epsrPosN4 = (FsuPosN4 - FsrPosN4) / Es + 1.05 * epsuPosN4
- FsrNegN4 = 0.2 * FsuNegN4
- epsrNegN4 = (FsuNegN4 - FsrNegN4) / Es + 1.05 * epsuNegN4
- pinchX = 0.2
- pinchY = 0.8
- damage1 = 0.0
- damage2 = 0.0
- beta = 0.0
+ FsuNegN4 = FyNegN4 + bNegN4 * (Es * (epsuNegN4 - epsyNegN4)) # noqa: N806
+ FsrPosN4 = 0.2 * FyPosN4 # noqa: N806
+ epsrPosN4 = (FsuPosN4 - FsrPosN4) / Es + 1.05 * epsuPosN4 # noqa: N806, F841
+ FsrNegN4 = 0.2 * FsuNegN4 # noqa: N806
+ epsrNegN4 = (FsuNegN4 - FsrNegN4) / Es + 1.05 * epsuNegN4 # noqa: N806, F841
+ pinchX = 0.2 # noqa: N806, F841
+ pinchY = 0.8 # noqa: N806, F841
+ damage1 = 0.0 # noqa: F841
+ damage2 = 0.0 # noqa: F841
+ beta = 0.0 # noqa: F841
# op.uniaxialMaterial('Hysteretic', Ele.EleTag * 6 + 4 + pos, FyPosN4, epsyPosN4, FsuPosN4, epsuPosN4
# , FsrPosN4, epsrPosN4, -FyNegN4, -epsyNegN4, -FsuNegN4, -epsuNegN4, -FsrNegN4
# , -epsrNegN4, pinchX, pinchY, damage1, damage2, beta)
- SteelN4Mat = Ele.EleTag * 6 + 4 + pos
- SteelMPFTag = 1e6 * SteelN4Mat
- R0 = 20.0
- cR1 = 0.925
- cR2 = 0.15
+ SteelN4Mat = Ele.EleTag * 6 + 4 + pos # noqa: N806
+ SteelMPFTag = 1e6 * SteelN4Mat # noqa: N806
+ R0 = 20.0 # noqa: N806
+ cR1 = 0.925 # noqa: N806
+ cR2 = 0.15 # noqa: N806
a1 = 0.0
a2 = 1.0
a3 = 0.0
a4 = 0.0
- print(
+ print( # noqa: T201
'SteelMPF',
int(SteelMPFTag),
FyPosN4,
@@ -1529,7 +1529,7 @@ def steel_mat_regu():
]
)
- print(
+ print( # noqa: T201
'MinMax',
int(SteelN4Mat),
int(SteelMPFTag),
@@ -1561,13 +1561,13 @@ def steel_mat_regu():
)
# Function: Parameters of non-regularized confined concrete
- def con_conf(b, h, nsB, nsH, sst):
+ def con_conf(b, h, nsB, nsH, sst): # noqa: N803
fpc = -fc
bcx = h - 2.0 * cover - dst
bcy = b - 2.0 * cover - dst
- Asx = nsB * Ast
- Asy = nsH * Ast
- Asvt = Asx + Asy
+ Asx = nsB * Ast # noqa: N806
+ Asy = nsH * Ast # noqa: N806
+ Asvt = Asx + Asy # noqa: N806
flx = Asvt * fy / sst / bcx
fly = Asvt * fy / sst / bcy
slx = bcx / (nsB - 1)
@@ -1580,58 +1580,58 @@ def con_conf(b, h, nsB, nsH, sst):
k1 = 6.7 * (fl_e / 1000.0) ** (-0.17)
fcc = fc + k1 * fl_e
fpcc = -fcc
- K = k1 * fl_e / fc
+ K = k1 * fl_e / fc # noqa: N806
epscc0 = eo1 * (1.0 + 5.0 * K)
rov = Asvt / sst / (bcx + bcy)
e85 = 260 * rov * epscc0 + eo85
epsccu = (e85 - epscc0) * (0.2 * fcc - fcc) / (0.85 * fcc - fcc) + epscc0
fccu = 0.2 * fpcc
- lambdaC = 0.10
+ lambdaC = 0.10 # noqa: N806
ft = 0.33 * sqrt(-fpc * MPa)
- Ets = ft / 0.002
+ Ets = ft / 0.002 # noqa: N806
return fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets
# Function: Parameters of non-regularized steel
def steel_mat():
- FyTestN4 = 490.0 * MPa
- FsuTestN4 = 630.0 * MPa
- epsuTestN4 = 0.10
- LgageTestN4 = phl
- Es = 200.0 * GPa
- FyPosN4 = FyTestN4
- epsyPosN4 = FyPosN4 / Es
- FyNegN4 = FyTestN4
- epsyNegN4 = FyNegN4 / Es
- FsuPosN4 = FsuTestN4
- epsuPosN4 = epsyPosN4 + LgageTestN4 / phl * (epsuTestN4 - epsyPosN4)
- bPosN4 = (FsuPosN4 - FyPosN4) / (Es * (epsuPosN4 - epsyPosN4))
- epsuNegN4 = min(-epsccu, epsuPosN4)
- bNegN4 = bPosN4
+ FyTestN4 = 490.0 * MPa # noqa: N806
+ FsuTestN4 = 630.0 * MPa # noqa: N806
+ epsuTestN4 = 0.10 # noqa: N806
+ LgageTestN4 = phl # noqa: N806
+ Es = 200.0 * GPa # noqa: N806
+ FyPosN4 = FyTestN4 # noqa: N806
+ epsyPosN4 = FyPosN4 / Es # noqa: N806
+ FyNegN4 = FyTestN4 # noqa: N806
+ epsyNegN4 = FyNegN4 / Es # noqa: N806
+ FsuPosN4 = FsuTestN4 # noqa: N806
+ epsuPosN4 = epsyPosN4 + LgageTestN4 / phl * (epsuTestN4 - epsyPosN4) # noqa: N806
+ bPosN4 = (FsuPosN4 - FyPosN4) / (Es * (epsuPosN4 - epsyPosN4)) # noqa: N806
+ epsuNegN4 = min(-epsccu, epsuPosN4) # noqa: N806
+ bNegN4 = bPosN4 # noqa: N806
# FsuNegN4 = FsuTestN4
- FsuNegN4 = FyNegN4 + bNegN4 * (Es * (epsuNegN4 - epsyNegN4))
- FsrPosN4 = 0.2 * FyPosN4
- epsrPosN4 = (FsuPosN4 - FsrPosN4) / Es + 1.05 * epsuPosN4
- FsrNegN4 = 0.2 * FsuNegN4
- epsrNegN4 = (FsuNegN4 - FsrNegN4) / Es + 1.05 * epsuNegN4
- pinchX = 0.2
- pinchY = 0.8
- damage1 = 0.0
- damage2 = 0.0
- beta = 0.0
+ FsuNegN4 = FyNegN4 + bNegN4 * (Es * (epsuNegN4 - epsyNegN4)) # noqa: N806
+ FsrPosN4 = 0.2 * FyPosN4 # noqa: N806
+ epsrPosN4 = (FsuPosN4 - FsrPosN4) / Es + 1.05 * epsuPosN4 # noqa: N806, F841
+ FsrNegN4 = 0.2 * FsuNegN4 # noqa: N806
+ epsrNegN4 = (FsuNegN4 - FsrNegN4) / Es + 1.05 * epsuNegN4 # noqa: N806, F841
+ pinchX = 0.2 # noqa: N806, F841
+ pinchY = 0.8 # noqa: N806, F841
+ damage1 = 0.0 # noqa: F841
+ damage2 = 0.0 # noqa: F841
+ beta = 0.0 # noqa: F841
# op.uniaxialMaterial('Hysteretic', Ele.EleTag * 6 + 4 + pos, FyPosN4, epsyPosN4, FsuPosN4, epsuPosN4
# , FsrPosN4, epsrPosN4, -FyNegN4, -epsyNegN4, -FsuNegN4, -epsuNegN4, -FsrNegN4
# , -epsrNegN4, pinchX, pinchY, damage1, damage2, beta)
- SteelN4Mat = Ele.EleTag * 6 + 4 + pos
- SteelMPFTag = 1e6 * SteelN4Mat
- R0 = 20.0
- cR1 = 0.925
- cR2 = 0.15
+ SteelN4Mat = Ele.EleTag * 6 + 4 + pos # noqa: N806
+ SteelMPFTag = 1e6 * SteelN4Mat # noqa: N806
+ R0 = 20.0 # noqa: N806
+ cR1 = 0.925 # noqa: N806
+ cR2 = 0.15 # noqa: N806
a1 = 0.0
a2 = 1.0
a3 = 0.0
a4 = 0.0
- print(
+ print( # noqa: T201
'SteelMPF',
int(SteelMPFTag),
FyPosN4,
@@ -1683,7 +1683,7 @@ def steel_mat():
]
)
- print(
+ print( # noqa: T201
'MinMax',
int(SteelN4Mat),
int(SteelMPFTag),
@@ -1715,7 +1715,7 @@ def steel_mat():
)
# Function: Creation of fibers in beams
- def fiber_beam(Ast, Asb, pos):
+ def fiber_beam(Ast, Asb, pos): # noqa: N803
op.section('Fiber', Ele.EleTag * 2 + pos)
op.patch(
'rect',
@@ -1749,7 +1749,7 @@ def fiber_beam(Ast, Asb, pos):
)
op.patch('rect', Ele.EleTag * 6 + 2 + pos, 2, 1, -y2, -z2, -y2 + dp, z2)
op.patch('rect', Ele.EleTag * 6 + 2 + pos, 2, 1, y2 - dp, -z2, y2, z2)
- print(
+ print( # noqa: T201
'BeamL',
Ele.EleTag * 6 + 4 + pos,
1,
@@ -1769,7 +1769,7 @@ def fiber_beam(Ast, Asb, pos):
y2 - dp,
-z2 + dp,
)
- print(
+ print( # noqa: T201
'BeamR',
Ele.EleTag * 6 + 4 + pos,
1,
@@ -1905,7 +1905,7 @@ def fiber_beam(Ast, Asb, pos):
op.fix(int(node[0]), 1, 1, 1)
outputLogger.add_array(['fix', int(node[0]), 1, 1, 1])
if node[2] > 0 and node[1] == 0:
- MasterNode = node[0]
+ MasterNode = node[0] # noqa: N806
if node[2] > 0 and node[1] != 0:
op.equalDOF(int(MasterNode), int(node[0]), 1)
outputLogger.add_array(
@@ -1914,28 +1914,28 @@ def fiber_beam(Ast, Asb, pos):
cover = 4 * cm
dst = 3 / 8 * inch
- Ast = pi * dst**2 / 4.0 # area de la barra del estribo
+ Ast = pi * dst**2 / 4.0 # area de la barra del estribo # noqa: N806
# creacion de columnas
# HBeam = float(self.ui.HBeam.text())
- HBeam = float(rootSIM['BeamDepth'])
+ HBeam = float(rootSIM['BeamDepth']) # noqa: N806
# HColi = float(self.ui.HColi.text()) # Column inside Depth
- HColi = float(rootSIM['IntColDepth'])
+ HColi = float(rootSIM['IntColDepth']) # noqa: N806
# HCole = float(self.ui.HCole.text()) # Column outside Depth
- HCole = float(rootSIM['ExtColDepth'])
+ HCole = float(rootSIM['ExtColDepth']) # noqa: N806
# fy = float(self.ui.fy.text()) * MPa
fy = float(rootSIM['FySteel']) * MPa
- Es = 200.0 * GPa
+ Es = 200.0 * GPa # noqa: N806, F841
# fcB = float(self.ui.fcB.text()) * MPa
- fcB = float(rootSIM['BeamFpc']) * MPa
+ fcB = float(rootSIM['BeamFpc']) * MPa # noqa: N806
# fcC = float(self.ui.fcC.text()) * MPa
- fcC = float(rootSIM['ColFpc']) * MPa
+ fcC = float(rootSIM['ColFpc']) * MPa # noqa: N806
op.geomTransf('PDelta', 1, '-jntOffset', 0, 0, 0, -HBeam / 2)
op.geomTransf('PDelta', 2, '-jntOffset', 0, HBeam / 2, 0, -HBeam / 2)
@@ -1996,36 +1996,36 @@ def fiber_beam(Ast, Asb, pos):
EleCol = []
EleBeam = []
- for Ele in Elements:
+ for Ele in Elements: # noqa: N806
if ListNodes[Ele.Nod_ini, 1] == ListNodes[Ele.Nod_end, 1]:
EleCol.append(Ele)
else:
EleBeam.append(Ele)
- platicHingeOpt = int(rootSIM['PlasticHingeOpt'])
- includeRegularization = bool(rootSIM['IncludeRegularization'])
+ platicHingeOpt = int(rootSIM['PlasticHingeOpt']) # noqa: N806
+ includeRegularization = bool(rootSIM['IncludeRegularization']) # noqa: N806
# print("platicHingeOpt",platicHingeOpt)
# print("includeRegularization",includeRegularization)
# Creation of non-linear elements (beams and columns)
- eo1, eo85, eo20, lambdaU = -0.002, -0.0038, -0.006, 0.1
- for Ele, DC in zip(EleCol, DataColDesing):
+ eo1, eo85, eo20, lambdaU = -0.002, -0.0038, -0.006, 0.1 # noqa: N806
+ for Ele, DC in zip(EleCol, DataColDesing): # noqa: N806
outputLogger.add_line(
'# Creating materials and elements for column ' + str(DC.EleTag)
)
- fc, Ec = fcC, Ele.EcEle
+ fc, Ec = fcC, Ele.EcEle # noqa: N806
if platicHingeOpt == 1:
phl = 0.5 * DC.h
- elif platicHingeOpt == 2:
+ elif platicHingeOpt == 2: # noqa: PLR2004
phl = 0.08 * Ele.LEle + 0.022 * fy / MPa * DC.db / mm
- elif platicHingeOpt == 3:
+ elif platicHingeOpt == 3: # noqa: PLR2004
phl = 0.05 * Ele.LEle + 0.1 * fy / MPa * DC.db / mm / sqrt(fc * MPa)
- if includeRegularization == True:
- fpc, epsc0, fcu, epscu, lambdaU, ft, Ets = con_inconf_regu()
- print(
+ if includeRegularization == True: # noqa: E712
+ fpc, epsc0, fcu, epscu, lambdaU, ft, Ets = con_inconf_regu() # noqa: N806
+ print( # noqa: T201
'Concrete02',
Ele.EleTag * 6,
fpc,
@@ -2088,7 +2088,7 @@ def fiber_beam(Ast, Asb, pos):
]
)
- fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf_regu(
+ fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf_regu( # noqa: N806
DC.b, DC.h, DC.nsB, DC.nsH, DC.sst
)
op.uniaxialMaterial(
@@ -2150,8 +2150,8 @@ def fiber_beam(Ast, Asb, pos):
# No regularization
else:
ft = 0.33 * sqrt(fcC * MPa)
- Ets = ft / 0.002
- print(
+ Ets = ft / 0.002 # noqa: N806
+ print( # noqa: T201
'Concrete02',
Ele.EleTag * 6,
-fcC,
@@ -2214,7 +2214,7 @@ def fiber_beam(Ast, Asb, pos):
]
)
- fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf(
+ fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf( # noqa: N806
DC.b, DC.h, DC.nsB, DC.nsH, DC.sst
)
op.uniaxialMaterial(
@@ -2345,8 +2345,8 @@ def fiber_beam(Ast, Asb, pos):
['patch', 'rect', Ele.EleTag * 6, 2, 1, y1 - dp, -z1, y1, z1]
)
- for dist, As in zip(DC.dist, DC.As):
- print(
+ for dist, As in zip(DC.dist, DC.As): # noqa: N806
+ print( # noqa: T201
'Col ',
Ele.EleTag * 6 + 4,
1,
@@ -2382,7 +2382,7 @@ def fiber_beam(Ast, Asb, pos):
outputLogger.add_line('}')
- MassDens = Ele.AEle * GConc / g
+ MassDens = Ele.AEle * GConc / g # noqa: N806
op.beamIntegration(
'HingeRadau',
Ele.EleTag,
@@ -2406,8 +2406,8 @@ def fiber_beam(Ast, Asb, pos):
MassDens,
)
- intgrStr = (
- '"HingeRadau'
+ intgrStr = ( # noqa: N806
+ '"HingeRadau' # noqa: ISC003
+ ' '
+ str(Ele.EleTag)
+ ' '
@@ -2434,15 +2434,15 @@ def fiber_beam(Ast, Asb, pos):
]
)
- for Ele, DB in zip(EleBeam, DataBeamDesing):
- fc, Ec, nsH = fcB, Ele.EcEle, 2
+ for Ele, DB in zip(EleBeam, DataBeamDesing): # noqa: N806
+ fc, Ec, nsH = fcB, Ele.EcEle, 2 # noqa: N806
if platicHingeOpt == 1:
phl1 = 0.5 * DB.h
phl2 = 0.5 * DB.h
- elif platicHingeOpt == 2:
+ elif platicHingeOpt == 2: # noqa: PLR2004
phl1 = 0.08 * Ele.LEle + 0.022 * fy / MPa * DB.db_t1 / mm
phl2 = 0.08 * Ele.LEle + 0.022 * fy / MPa * DB.db_t2 / mm
- elif platicHingeOpt == 3:
+ elif platicHingeOpt == 3: # noqa: PLR2004
phl1 = 0.05 * Ele.LEle + 0.1 * fy / MPa * DB.db_t1 / mm / sqrt(
fc * MPa
)
@@ -2454,9 +2454,9 @@ def fiber_beam(Ast, Asb, pos):
'# Creating materials and elements for beam ' + str(DB.EleTag)
)
- if includeRegularization == True:
+ if includeRegularization == True: # noqa: E712
phl = phl1
- fpc, epsc0, fcu, epscu, lambdaU, ft, Ets = con_inconf_regu()
+ fpc, epsc0, fcu, epscu, lambdaU, ft, Ets = con_inconf_regu() # noqa: N806
op.uniaxialMaterial(
'Concrete02',
Ele.EleTag * 6,
@@ -2484,7 +2484,7 @@ def fiber_beam(Ast, Asb, pos):
)
phl = phl2
- fpc, epsc0, fcu, epscu, lambdaU, ft, Ets = con_inconf_regu()
+ fpc, epsc0, fcu, epscu, lambdaU, ft, Ets = con_inconf_regu() # noqa: N806
op.uniaxialMaterial(
'Concrete02',
Ele.EleTag * 6 + 1,
@@ -2512,7 +2512,7 @@ def fiber_beam(Ast, Asb, pos):
)
phl, pos = phl1, 0
- fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf_regu(
+ fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf_regu( # noqa: N806
DB.b, DB.h, DB.ns1, nsH, DB.ss1
)
op.uniaxialMaterial(
@@ -2543,7 +2543,7 @@ def fiber_beam(Ast, Asb, pos):
steel_mat_regu()
phl, pos = phl2, 1
- fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf_regu(
+ fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf_regu( # noqa: N806
DB.b, DB.h, DB.ns2, nsH, DB.ss2
)
op.uniaxialMaterial(
@@ -2576,7 +2576,7 @@ def fiber_beam(Ast, Asb, pos):
# No regularization
else:
ft = 0.33 * sqrt(fcB * MPa)
- Ets = ft / 0.002
+ Ets = ft / 0.002 # noqa: N806
op.uniaxialMaterial(
'Concrete02',
Ele.EleTag * 6,
@@ -2629,7 +2629,7 @@ def fiber_beam(Ast, Asb, pos):
]
)
- fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf(
+ fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf( # noqa: N806
DB.b, DB.h, DB.ns1, nsH, DB.ss1
)
op.uniaxialMaterial(
@@ -2658,7 +2658,7 @@ def fiber_beam(Ast, Asb, pos):
]
)
- fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf(
+ fpcc, epscc0, fccu, epsccu, lambdaC, ft, Ets = con_conf( # noqa: N806
DB.b, DB.h, DB.ns2, nsH, DB.ss2
)
op.uniaxialMaterial(
@@ -2700,7 +2700,7 @@ def fiber_beam(Ast, Asb, pos):
dp = DB.h - min(DB.db2, DB.dt2)
pos = 1
fiber_beam(DB.Ast2, DB.Asb2, pos)
- MassDens = Ele.AEle * GConc / g + WDLS / g
+ MassDens = Ele.AEle * GConc / g + WDLS / g # noqa: N806
op.beamIntegration(
'HingeRadau',
Ele.EleTag,
@@ -2723,8 +2723,8 @@ def fiber_beam(Ast, Asb, pos):
MassDens,
)
- intgrStr = (
- '"HingeRadau'
+ intgrStr = ( # noqa: N806
+ '"HingeRadau' # noqa: ISC003
+ ' '
+ str(Ele.EleTag * 2)
+ ' '
@@ -2753,21 +2753,21 @@ def fiber_beam(Ast, Asb, pos):
list_beams = [Ele.EleTag for Ele in EleBeam]
list_cols = [Ele.EleTag for Ele in EleCol]
- print('list_beams =', list_beams)
- print('list_cols =', list_cols)
+ print('list_beams =', list_beams) # noqa: T201
+ print('list_cols =', list_cols) # noqa: T201
- print('Model Nonlinear Built')
+ print('Model Nonlinear Built') # noqa: T201
# KZ: gravity analysis
outputLogger.add_array(['timeSeries Linear 1'])
outputLogger.add_array(['pattern Plain 1 Constant {'])
- for Ele in EleCol:
+ for Ele in EleCol: # noqa: N806
outputLogger.add_array(
[
f'eleLoad -ele {Ele.EleTag} -type -beamUniform 0 {-Ele.AEle * GConc}'
]
)
- for Ele in EleBeam:
+ for Ele in EleBeam: # noqa: N806
outputLogger.add_array(
[
f'eleLoad -ele {Ele.EleTag} -type -beamUniform {-Ele.AEle * GConc - WDL}'
@@ -2810,11 +2810,11 @@ def fiber_beam(Ast, Asb, pos):
outputLogger.add_array([f'set a1 [expr {xi}*2.0/($lambda1+$lambda2)]'])
outputLogger.add_array(['rayleigh $a0 0.0 $a1 0.0'])
- if preparePushover == False:
+ if preparePushover == False: # noqa: E712
return
- if not os.path.exists('Pushover'):
- os.mkdir('Pushover')
+ if not os.path.exists('Pushover'): # noqa: PTH110
+ os.mkdir('Pushover') # noqa: PTH102
# Recording of forces and deformations from nonlinear analysis
op.recorder(
@@ -2931,16 +2931,16 @@ def fiber_beam(Ast, Asb, pos):
# Create a Plain load pattern for gravity loading with a Linear TimeSeries
op.timeSeries('Linear', 1)
op.pattern('Plain', 1, 1)
- for Ele in EleCol:
+ for Ele in EleCol: # noqa: N806
op.eleLoad(
'-ele', Ele.EleTag, '-type', '-beamUniform', 0, -Ele.AEle * GConc
)
- for Ele in EleBeam:
+ for Ele in EleBeam: # noqa: N806
op.eleLoad(
'-ele', Ele.EleTag, '-type', '-beamUniform', -Ele.AEle * GConc - WDL
)
- Tol = 1.0e-6 # convergence tolerance for test
+ Tol = 1.0e-6 # convergence tolerance for test # noqa: N806
op.constraints('Plain') # how it handles boundary conditions
op.numberer(
'Plain'
@@ -2954,8 +2954,8 @@ def fiber_beam(Ast, Asb, pos):
op.algorithm(
'KrylovNewton'
) # use Newton solution algorithm: updates tangent stiffness at every iteration
- NstepGravity = 10 # apply gravity in 10 steps
- DGravity = 1.0 / NstepGravity # first load increment;
+ NstepGravity = 10 # apply gravity in 10 steps # noqa: N806
+ DGravity = 1.0 / NstepGravity # first load increment; # noqa: N806
op.integrator(
'LoadControl', DGravity
) # determine the next time step for an analysis
@@ -2964,58 +2964,58 @@ def fiber_beam(Ast, Asb, pos):
op.loadConst('-time', 0.0)
# xi = 0.05 # damping ratio
- MpropSwitch = 1.0
- KcurrSwitch = 0.0
- KcommSwitch = 1.0
- KinitSwitch = 0.0
- nEigenI = 1 # mode 1
- nEigenI2 = 2 # mode 2
- nEigenJ = 3 # mode 3
+ MpropSwitch = 1.0 # noqa: N806
+ KcurrSwitch = 0.0 # noqa: N806
+ KcommSwitch = 1.0 # noqa: N806
+ KinitSwitch = 0.0 # noqa: N806
+ nEigenI = 1 # mode 1 # noqa: N806
+ nEigenI2 = 2 # mode 2 # noqa: N806
+ nEigenJ = 3 # mode 3 # noqa: N806
# eigenvalue analysis for nEigenJ modes
- lambdaN = op.eigen(nEigenJ)
- lambdaI = lambdaN[nEigenI - 1] # eigenvalue mode i
- lambdaI2 = lambdaN[nEigenI2 - 1] # eigenvalue mode i2
- lambdaJ = lambdaN[nEigenJ - 1] # eigenvalue mode j
- print('lambdaN ', lambdaN)
- omegaI = pow(lambdaI, 0.5)
- omegaI2 = pow(lambdaI2, 0.5)
- omegaJ = pow(lambdaJ, 0.5)
+ lambdaN = op.eigen(nEigenJ) # noqa: N806
+ lambdaI = lambdaN[nEigenI - 1] # eigenvalue mode i # noqa: N806
+ lambdaI2 = lambdaN[nEigenI2 - 1] # eigenvalue mode i2 # noqa: N806
+ lambdaJ = lambdaN[nEigenJ - 1] # eigenvalue mode j # noqa: N806
+ print('lambdaN ', lambdaN) # noqa: T201
+ omegaI = pow(lambdaI, 0.5) # noqa: N806
+ omegaI2 = pow(lambdaI2, 0.5) # noqa: N806
+ omegaJ = pow(lambdaJ, 0.5) # noqa: N806
T1m = 2.0 * pi / omegaI
T2m = 2.0 * pi / omegaI2
- print('Ta1=', T1m, 'seg', ' Ta2=', T2m, ' seg')
- alphaM = (
+ print('Ta1=', T1m, 'seg', ' Ta2=', T2m, ' seg') # noqa: T201
+ alphaM = ( # noqa: N806
MpropSwitch * xi * (2.0 * omegaI * omegaJ) / (omegaI + omegaJ)
) # M-prop. damping D = alphaM*M
- betaKcurr = (
+ betaKcurr = ( # noqa: N806
KcurrSwitch * 2.0 * xi / (omegaI + omegaJ)
) # current-K +beatKcurr*KCurrent
- betaKcomm = (
+ betaKcomm = ( # noqa: N806
KcommSwitch * 2.0 * xi / (omegaI + omegaJ)
) # last-committed K +betaKcomm*KlastCommitt
- betaKinit = (
+ betaKinit = ( # noqa: N806
KinitSwitch * 2.0 * xi / (omegaI + omegaJ)
) # initial-K +beatKinit*Kini
op.rayleigh(alphaM, betaKcurr, betaKinit, betaKcomm) # RAYLEIGH damping
# Pushover function
- def Pushover(self, rootSIM):
- def __init__(rootSIM):
+ def Pushover(self, rootSIM): # noqa: C901, N802, N803, D102, PLR0915
+ def __init__(rootSIM): # noqa: N803, N807
self.rootSIM = rootSIM
- global cbar
+ global cbar # noqa: PLW0602
- def singlePush1(dref, mu, ctrlNode, dispDir, nSteps):
- IOflag = 2
- testType = 'RelativeNormDispIncr'
+ def singlePush1(dref, mu, ctrlNode, dispDir, nSteps): # noqa: C901, N802, N803
+ IOflag = 2 # noqa: N806
+ testType = 'RelativeNormDispIncr' # noqa: N806
# set testType EnergyIncr; # Dont use with Penalty constraints
# set testType RelativeNormUnbalance; # Dont use with Penalty constraints
# set testType RelativeNormDispIncr; # Dont use with Lagrange constraints
# set testType RelativeTotalNormDispIncr; # Dont use with Lagrange constraints
# set testType RelativeEnergyIncr; # Dont use with Penalty constraints
- tolInit = 1.0e-6 # the initial Tolerance, so it can be referred back to
- iterInit = 50 # the initial Max Number of Iterations
- algorithmType = 'KrylovNewton' # the algorithm type
+ tolInit = 1.0e-6 # the initial Tolerance, so it can be referred back to # noqa: N806
+ iterInit = 50 # the initial Max Number of Iterations # noqa: N806
+ algorithmType = 'KrylovNewton' # the algorithm type # noqa: N806
op.test(
testType, tolInit, iterInit
@@ -3024,8 +3024,8 @@ def singlePush1(dref, mu, ctrlNode, dispDir, nSteps):
algorithmType
) # use Newton solution algorithm: updates tangent stiffness at every iteration
disp = dref * mu
- dU = disp / (1.0 * nSteps)
- print('dref ', dref, 'mu ', mu, 'dU ', dU, 'disp ', disp)
+ dU = disp / (1.0 * nSteps) # noqa: N806
+ print('dref ', dref, 'mu ', mu, 'dU ', dU, 'disp ', disp) # noqa: T201
op.integrator(
'DisplacementControl', ctrlNode, dispDir, dU
) # determine the next time step for an analysis
@@ -3033,7 +3033,7 @@ def singlePush1(dref, mu, ctrlNode, dispDir, nSteps):
# Print values
if IOflag >= 1:
- print('singlePush: Push ', ctrlNode, ' to ', mu)
+ print('singlePush: Push ', ctrlNode, ' to ', mu) # noqa: T201
# the initial values to start the while loop
ok = 0
@@ -3042,17 +3042,17 @@ def singlePush1(dref, mu, ctrlNode, dispDir, nSteps):
# This feature of disabling the possibility of having a negative loading has been included.
# This has been adapted from a similar script by Prof. Garbaggio
htot = op.nodeCoord(ctrlNode, 2)
- maxDriftPiso = 0.0
- VBasal_v = []
- DriftTecho_v = []
+ maxDriftPiso = 0.0 # noqa: N806
+ VBasal_v = [] # noqa: N806
+ DriftTecho_v = [] # noqa: N806
while step <= nSteps and ok == 0 and loadf > 0:
# self.ui.progressBar.setValue(100 * step / nSteps)
ok = op.analyze(1)
loadf = op.getTime()
temp = op.nodeDisp(ctrlNode, dispDir)
# Print the current displacement
- if IOflag >= 2:
- print(
+ if IOflag >= 2: # noqa: PLR2004
+ print( # noqa: T201
'Pushed ',
ctrlNode,
' in ',
@@ -3068,7 +3068,7 @@ def singlePush1(dref, mu, ctrlNode, dispDir, nSteps):
# If the analysis fails, try the following changes to achieve convergence
# Analysis will be slower in here though...
if ok != 0:
- print('Trying relaxed convergence..')
+ print('Trying relaxed convergence..') # noqa: T201
op.test(
testType, tolInit * 0.01, iterInit * 50
) # determine if convergence has been achieved at the end of an iteration step
@@ -3077,7 +3077,7 @@ def singlePush1(dref, mu, ctrlNode, dispDir, nSteps):
testType, tolInit, iterInit
) # determine if convergence has been achieved at the end of an iteration step
if ok != 0:
- print('Trying Newton with initial then current .')
+ print('Trying Newton with initial then current .') # noqa: T201
op.test(
testType, tolInit * 0.01, iterInit * 50
) # determine if convergence has been achieved at the end of an iteration step
@@ -3088,7 +3088,7 @@ def singlePush1(dref, mu, ctrlNode, dispDir, nSteps):
testType, tolInit, iterInit
) # determine if convergence has been achieved at the end of an iteration step
if ok != 0:
- print('Trying ModifiedNewton with initial ..')
+ print('Trying ModifiedNewton with initial ..') # noqa: T201
op.test(
testType, tolInit * 0.01, iterInit * 50
) # determine if convergence has been achieved at the end of an iteration step
@@ -3099,7 +3099,7 @@ def singlePush1(dref, mu, ctrlNode, dispDir, nSteps):
testType, tolInit, iterInit
) # determine if convergence has been achieved at the end of an iteration step
if ok != 0:
- print('Trying KrylovNewton ..')
+ print('Trying KrylovNewton ..') # noqa: T201
op.test(
testType, tolInit * 0.01, iterInit * 50
) # determine if convergence has been achieved at the end of an iteration step
@@ -3110,7 +3110,7 @@ def singlePush1(dref, mu, ctrlNode, dispDir, nSteps):
testType, tolInit, iterInit
) # determine if convergence has been achieved at the end of an iteration step
if ok != 0:
- print('Perform a Hail Mary ....')
+ print('Perform a Hail Mary ....') # noqa: T201
op.test(
'FixedNumIter', iterInit
) # determine if convergence has been achieved at the end of an iteration step
@@ -3120,8 +3120,8 @@ def singlePush1(dref, mu, ctrlNode, dispDir, nSteps):
ListNodesDrift[:-1, 0], ListNodesDrift[1:, 0]
):
# print('nod_ini ', nod_ini, 'nod_end', nod_end)
- nod_ini = int(nod_ini)
- nod_end = int(nod_end)
+ nod_ini = int(nod_ini) # noqa: PLW2901
+ nod_end = int(nod_end) # noqa: PLW2901
pos_i = op.nodeCoord(nod_ini, 2)
pos_s = op.nodeCoord(nod_end, 2)
hpiso = pos_s - pos_i
@@ -3129,27 +3129,27 @@ def singlePush1(dref, mu, ctrlNode, dispDir, nSteps):
desp_s = op.nodeDisp(nod_end, 1)
desp_piso = abs(desp_s - desp_i)
drift_piso = desp_piso / hpiso
- maxDriftPiso = max(maxDriftPiso, drift_piso)
+ maxDriftPiso = max(maxDriftPiso, drift_piso) # noqa: N806
- VBasal = 0.0
+ VBasal = 0.0 # noqa: N806
op.reactions()
for node in ListNodesBasal:
# print('ind Basal ', node[0])
- VBasal = VBasal + op.nodeReaction(node[0], 1)
- VBasal_v = np.append(VBasal_v, VBasal)
- DriftTecho = op.nodeDisp(ctrlNode, dispDir) / htot
- DriftTecho_v = np.append(DriftTecho_v, DriftTecho)
+ VBasal = VBasal + op.nodeReaction(node[0], 1) # noqa: N806
+ VBasal_v = np.append(VBasal_v, VBasal) # noqa: N806
+ DriftTecho = op.nodeDisp(ctrlNode, dispDir) / htot # noqa: N806
+ DriftTecho_v = np.append(DriftTecho_v, DriftTecho) # noqa: N806
loadf = op.getTime()
step += 1
- maxDriftTecho = dU * step / htot
- maxDriftTecho2 = op.nodeDisp(ctrlNode, dispDir) / htot
+ maxDriftTecho = dU * step / htot # noqa: N806
+ maxDriftTecho2 = op.nodeDisp(ctrlNode, dispDir) / htot # noqa: N806
if ok != 0:
- print('DispControl Analysis FAILED')
+ print('DispControl Analysis FAILED') # noqa: T201
else:
- print('DispControl Analysis SUCCESSFUL')
+ print('DispControl Analysis SUCCESSFUL') # noqa: T201
if loadf <= 0:
- print('Stopped because of Load factor below zero: ', loadf)
+ print('Stopped because of Load factor below zero: ', loadf) # noqa: T201
# if PrintFlag == 0:
# os.remove("singlePush.txt")
# print singlePush.txt
@@ -3162,7 +3162,7 @@ def singlePush1(dref, mu, ctrlNode, dispDir, nSteps):
)
# Pushover function varying tests and algorithms
- def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
+ def singlePush(dref, mu, ctrlNode, dispDir, nSteps): # noqa: C901, N802, N803
# --------------------------------------------------
# Description of Parameters
# --------------------------------------------------
@@ -3196,13 +3196,13 @@ def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
# test = {1:'NormDispIncr', 2: 'RelativeEnergyIncr', 3:'EnergyIncr'}
# alg = {1:'KrylovNewton', 2:'ModifiedNewton'}
- IOflag = 2
- PrintFlag = 0
- testType = 'RelativeNormDispIncr' # Dont use with Penalty constraints
+ IOflag = 2 # noqa: N806
+ PrintFlag = 0 # noqa: N806, F841
+ testType = 'RelativeNormDispIncr' # Dont use with Penalty constraints # noqa: N806
- tolInit = 1.0e-7 # the initial Tolerance, so it can be referred back to
- iterInit = 50 # the initial Max Number of Iterations
- algorithmType = 'KrylovNewton' # the algorithm type
+ tolInit = 1.0e-7 # the initial Tolerance, so it can be referred back to # noqa: N806
+ iterInit = 50 # the initial Max Number of Iterations # noqa: N806
+ algorithmType = 'KrylovNewton' # the algorithm type # noqa: N806
# algorithmType Newton; # the algorithm type
# algorithmType Newton; # the algorithm type
@@ -3217,8 +3217,8 @@ def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
algorithmType
) # use Newton solution algorithm: updates tangent stiffness at every iteration
disp = dref * mu
- dU = disp / (1.0 * nSteps)
- print(
+ dU = disp / (1.0 * nSteps) # noqa: N806
+ print( # noqa: T201
'dref ', dref, 'mu ', mu, 'dU ', dU, 'disp ', disp, 'nSteps ', nSteps
)
op.integrator(
@@ -3228,7 +3228,7 @@ def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
# Print values
if IOflag >= 1:
- print('singlePush: Push ', ctrlNode, ' to ', mu)
+ print('singlePush: Push ', ctrlNode, ' to ', mu) # noqa: T201
# the initial values to start the while loop
ok = 0
@@ -3236,10 +3236,10 @@ def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
loadf = 1.0
# This feature of disabling the possibility of having a negative loading has been included.
# This has been adapted from a similar script by Prof. Garbaggio
- maxDriftPiso = 0.0
+ maxDriftPiso = 0.0 # noqa: N806
htot = op.nodeCoord(ctrlNode, 2)
- VBasal_v = []
- DriftTecho_v = []
+ VBasal_v = [] # noqa: N806
+ DriftTecho_v = [] # noqa: N806
# factor_v = np.array([1,0.75,0.5,0.25,0.1,2,3,5,10])
# fact_v = np.array([50,100,500])
# factor = 100
@@ -3249,8 +3249,8 @@ def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
ok = op.analyze(1)
loadf = op.getTime()
temp = op.nodeDisp(ctrlNode, dispDir)
- if IOflag >= 2:
- print(
+ if IOflag >= 2: # noqa: PLR2004
+ print( # noqa: T201
'Pushed ',
ctrlNode,
' in ',
@@ -3268,14 +3268,14 @@ def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
for j in alg:
for i in test:
for fact in [1, 20, 50]:
- if ok != 0 and j >= 4 and i != 7:
+ if ok != 0 and j >= 4 and i != 7: # noqa: PLR2004
# print('Trying ',str(alg[j]))
op.test(test[i], tolInit * 0.01, iterInit * fact)
op.algorithm(alg[j])
ok = op.analyze(1)
op.algorithm(algorithmType)
op.test(testType, tolInit, iterInit)
- elif ok != 0 and j < 4 and i != 7:
+ elif ok != 0 and j < 4 and i != 7: # noqa: PLR2004
# print('Trying ',str(alg[j]))
op.test(test[i], tolInit, iterInit * fact)
op.algorithm(alg[j], '-initial')
@@ -3284,7 +3284,7 @@ def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
op.test(testType, tolInit, iterInit)
if ok == 0:
break
- if ok != 0 and i == 7:
+ if ok != 0 and i == 7: # noqa: PLR2004
op.test(test[i], iterInit)
op.algorithm(alg[j])
ok = op.analyze(1)
@@ -3302,8 +3302,8 @@ def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
ListNodesDrift[:-1, 0], ListNodesDrift[1:, 0]
):
# print('nod_ini ', nod_ini, 'nod_end', nod_end)
- nod_ini = int(nod_ini)
- nod_end = int(nod_end)
+ nod_ini = int(nod_ini) # noqa: PLW2901
+ nod_end = int(nod_end) # noqa: PLW2901
pos_i = op.nodeCoord(nod_ini, 2)
pos_s = op.nodeCoord(nod_end, 2)
hpiso = pos_s - pos_i
@@ -3311,27 +3311,27 @@ def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
desp_s = op.nodeDisp(nod_end, 1)
desp_piso = abs(desp_s - desp_i)
drift_piso = desp_piso / hpiso
- maxDriftPiso = max(maxDriftPiso, drift_piso)
+ maxDriftPiso = max(maxDriftPiso, drift_piso) # noqa: N806
- VBasal = 0.0
+ VBasal = 0.0 # noqa: N806
op.reactions()
for node in ListNodesBasal:
# print('ind Basal ', node[0])
- VBasal = VBasal + op.nodeReaction(node[0], 1)
- VBasal_v = np.append(VBasal_v, VBasal)
- DriftTecho = op.nodeDisp(ctrlNode, dispDir) / htot
- DriftTecho_v = np.append(DriftTecho_v, DriftTecho)
+ VBasal = VBasal + op.nodeReaction(node[0], 1) # noqa: N806
+ VBasal_v = np.append(VBasal_v, VBasal) # noqa: N806
+ DriftTecho = op.nodeDisp(ctrlNode, dispDir) / htot # noqa: N806
+ DriftTecho_v = np.append(DriftTecho_v, DriftTecho) # noqa: N806
loadf = op.getTime()
step += 1
- maxDriftTecho = dU * step / htot
- maxDriftTecho2 = op.nodeDisp(ctrlNode, dispDir) / htot
+ maxDriftTecho = dU * step / htot # noqa: N806
+ maxDriftTecho2 = op.nodeDisp(ctrlNode, dispDir) / htot # noqa: N806
if ok != 0:
- print('DispControl Analysis FAILED')
+ print('DispControl Analysis FAILED') # noqa: T201
else:
- print('DispControl Analysis SUCCESSFUL')
+ print('DispControl Analysis SUCCESSFUL') # noqa: T201
if loadf <= 0:
- print('Stopped because of Load factor below zero: ', loadf)
+ print('Stopped because of Load factor below zero: ', loadf) # noqa: T201
# if PrintFlag == 0:
# os.remove("singlePush.txt")
# print singlePush.txt
@@ -3343,54 +3343,54 @@ def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
DriftTecho_v,
)
- ListNodesDrift = ListNodes[np.where(ListNodes[:, 1] == 0.0)]
- ListNodesBasal = ListNodes[np.where(ListNodes[:, 2] == 0.0)]
- if T1m <= 0.5:
+ ListNodesDrift = ListNodes[np.where(ListNodes[:, 1] == 0.0)] # noqa: N806
+ ListNodesBasal = ListNodes[np.where(ListNodes[:, 2] == 0.0)] # noqa: N806
+ if T1m <= 0.5: # noqa: PLR2004
k = 1.0
- elif T1m <= 2.5:
+ elif T1m <= 2.5: # noqa: PLR2004
k = 0.75 + 0.5 * T1m
else:
k = 2.0
- sumH = np.sum(np.power(Loc_heigth, k))
+ sumH = np.sum(np.power(Loc_heigth, k)) # noqa: N806
floors_num = len(Loc_heigth)
# Match default example
- triangForceDist = True
+ triangForceDist = True # noqa: N806
# Defining the pushover lateral distribution type
- if triangForceDist == True:
- Fp = np.power(Loc_heigth, k) / sumH
+ if triangForceDist == True: # noqa: E712
+ Fp = np.power(Loc_heigth, k) / sumH # noqa: N806
else:
- Fp = 1.0 / floors_num * np.ones(floors_num + 1)
- print('Fp =', Fp)
+ Fp = 1.0 / floors_num * np.ones(floors_num + 1) # noqa: N806
+ print('Fp =', Fp) # noqa: T201
op.loadConst('-time', 0.0)
op.timeSeries('Linear', 2)
op.pattern('Plain', 2, 1)
- for node, fp, ind in zip(ListNodesDrift, Fp, range(floors_num)):
+ for node, fp, ind in zip(ListNodesDrift, Fp, range(floors_num)): # noqa: B007
op.load(int(node[0]), fp, 0.0, 0.0)
- Htotal = Loc_heigth[-1]
+ Htotal = Loc_heigth[-1] # noqa: N806
# Der_obj = float(self.ui.Der_obj.text())
- Der_obj = 0.04 # Match default example
- Des_obj = Der_obj * Htotal # Desplazamiento objetivo
+ Der_obj = 0.04 # Match default example # noqa: N806
+ Des_obj = Der_obj * Htotal # Desplazamiento objetivo # noqa: N806
# nSteps = int(self.ui.nSteps.text())
- nSteps = 110 # Match default example
+ nSteps = 110 # Match default example # noqa: N806
dref = Des_obj / nSteps
mu = nSteps
# Node where displacement is read
- IDctrlNode = int(ListNodesDrift[-1, 0])
- print('IDctrlNode =', IDctrlNode)
- IDctrlDOF = 1 # DOF x=1, y=2
- Tol = 1.0e-4 # Tolerance
-
- runFastPushover = True
- if runFastPushover == True:
- maxDriftPiso, maxDriftTecho, maxDriftTecho2, VBasal_v, DriftTecho_v = (
+ IDctrlNode = int(ListNodesDrift[-1, 0]) # noqa: N806
+ print('IDctrlNode =', IDctrlNode) # noqa: T201
+ IDctrlDOF = 1 # DOF x=1, y=2 # noqa: N806
+ Tol = 1.0e-4 # Tolerance # noqa: N806, F841
+
+ runFastPushover = True # noqa: N806
+ if runFastPushover == True: # noqa: E712
+ maxDriftPiso, maxDriftTecho, maxDriftTecho2, VBasal_v, DriftTecho_v = ( # noqa: N806
singlePush1(dref, mu, IDctrlNode, IDctrlDOF, nSteps)
)
else:
- maxDriftPiso, maxDriftTecho, maxDriftTecho2, VBasal_v, DriftTecho_v = (
+ maxDriftPiso, maxDriftTecho, maxDriftTecho2, VBasal_v, DriftTecho_v = ( # noqa: N806
singlePush(dref, mu, IDctrlNode, IDctrlDOF, nSteps)
)
@@ -3405,20 +3405,20 @@ def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
cols_def_1 = np.loadtxt('Pushover/cols_def_1.out')
cols_force_6 = np.loadtxt('Pushover/cols_force_6.out')
cols_def_6 = np.loadtxt('Pushover/cols_def_6.out')
- print('cols_def_1', cols_def_1)
+ print('cols_def_1', cols_def_1) # noqa: T201
# fy = float(self.ui.fy.text()) * MPa
fy = float(rootSIM['FySteel']) * MPa
- print('Fy', fy)
+ print('Fy', fy) # noqa: T201
- Es = 200.0 * GPa
+ Es = 200.0 * GPa # noqa: N806
ey = fy / Es
num_beams = len(EleBeam)
num_cols = len(EleCol)
- CD_Beams = np.zeros([num_beams, 2])
+ CD_Beams = np.zeros([num_beams, 2]) # noqa: N806
# Calculation of curvature ductility of beams and columns
- for ind, DB in zip(range(1, num_beams + 1), DataBeamDesing):
+ for ind, DB in zip(range(1, num_beams + 1), DataBeamDesing): # noqa: N806
ets_beam_1 = beams_def_1[:, 2 * ind - 1] + beams_def_1[:, 2 * ind] * (
DB.dt1 - DB.h / 2
)
@@ -3433,32 +3433,32 @@ def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
)
es_beam_1 = np.maximum(np.absolute(ets_beam_1), np.absolute(ebs_beam_1))
es_beam_6 = np.maximum(np.absolute(ets_beam_6), np.absolute(ebs_beam_6))
- print('es_beam_1', es_beam_1, 'es_beam_6', es_beam_6)
+ print('es_beam_1', es_beam_1, 'es_beam_6', es_beam_6) # noqa: T201
if np.max(es_beam_1) <= ey:
- CD_1 = 0
+ CD_1 = 0 # noqa: N806
else:
fi_1 = np.absolute(beams_def_1[:, 2 * ind])
- M_beam_1 = np.absolute(beams_force_1[:, 2 * ind])
+ M_beam_1 = np.absolute(beams_force_1[:, 2 * ind]) # noqa: N806
f = interpolate.interp1d(es_beam_1, M_beam_1)
- My_1 = f(ey)
+ My_1 = f(ey) # noqa: N806
f = interpolate.interp1d(M_beam_1, fi_1)
fiy_1 = f(My_1)
- CD_1 = fi_1[-1] / fiy_1
+ CD_1 = fi_1[-1] / fiy_1 # noqa: N806
if np.max(es_beam_6) <= ey:
- CD_6 = 0
+ CD_6 = 0 # noqa: N806
else:
fi_6 = np.absolute(beams_def_6[:, 2 * ind])
- M_beam_6 = np.absolute(beams_force_6[:, 2 * ind])
+ M_beam_6 = np.absolute(beams_force_6[:, 2 * ind]) # noqa: N806
f = interpolate.interp1d(es_beam_6, M_beam_6)
- My_6 = f(ey)
+ My_6 = f(ey) # noqa: N806
f = interpolate.interp1d(M_beam_6, fi_6)
fiy_6 = f(My_6)
- CD_6 = fi_6[-1] / fiy_6
+ CD_6 = fi_6[-1] / fiy_6 # noqa: N806
CD_Beams[ind - 1, :] = [CD_1, CD_6]
- print('CD_Beams =', CD_Beams)
+ print('CD_Beams =', CD_Beams) # noqa: T201
- CD_Cols = np.zeros([num_cols, 2])
- for ind, DC in zip(range(1, num_cols + 1), DataColDesing):
+ CD_Cols = np.zeros([num_cols, 2]) # noqa: N806
+ for ind, DC in zip(range(1, num_cols + 1), DataColDesing): # noqa: N806
ets_col_1 = cols_def_1[:, 2 * ind - 1] + cols_def_1[:, 2 * ind] * (
DC.d - DC.h / 2
)
@@ -3473,78 +3473,78 @@ def singlePush(dref, mu, ctrlNode, dispDir, nSteps):
)
es_col_1 = np.maximum(np.absolute(ets_col_1), np.absolute(ebs_col_1))
es_col_6 = np.maximum(np.absolute(ets_col_6), np.absolute(ebs_col_6))
- print('es_col_1', es_col_1, 'es_col_6', es_col_6)
+ print('es_col_1', es_col_1, 'es_col_6', es_col_6) # noqa: T201
if np.max(es_col_1) <= ey:
- CD_1 = 0
+ CD_1 = 0 # noqa: N806
else:
fi_1 = np.absolute(cols_def_1[:, 2 * ind])
- M_col_1 = np.absolute(cols_force_1[:, 2 * ind])
+ M_col_1 = np.absolute(cols_force_1[:, 2 * ind]) # noqa: N806
f = interpolate.interp1d(es_col_1, M_col_1)
- Mfy_1 = f(ey)
+ Mfy_1 = f(ey) # noqa: N806
f = interpolate.interp1d(M_col_1, fi_1)
fify_1 = f(Mfy_1)
- My_1 = np.max(M_col_1)
+ My_1 = np.max(M_col_1) # noqa: N806
fiy_1 = My_1 / Mfy_1 * fify_1
- CD_1 = fi_1[-1] / fiy_1
+ CD_1 = fi_1[-1] / fiy_1 # noqa: N806
if np.max(es_col_6) <= ey:
- CD_6 = 0
+ CD_6 = 0 # noqa: N806
else:
fi_6 = np.absolute(cols_def_6[:, 2 * ind])
- M_col_6 = np.absolute(cols_force_6[:, 2 * ind])
+ M_col_6 = np.absolute(cols_force_6[:, 2 * ind]) # noqa: N806
f = interpolate.interp1d(es_col_6, M_col_6)
- Mfy_6 = f(ey)
+ Mfy_6 = f(ey) # noqa: N806
f = interpolate.interp1d(M_col_6, fi_6)
fify_6 = f(Mfy_6)
- My_6 = np.max(M_col_6)
+ My_6 = np.max(M_col_6) # noqa: N806
fiy_6 = My_6 / Mfy_6 * fify_6
- CD_6 = fi_6[-1] / fiy_6
+ CD_6 = fi_6[-1] / fiy_6 # noqa: N806
CD_Cols[ind - 1, :] = [CD_1, CD_6]
- print('CD_Cols =', CD_Cols)
- CD_Ele = np.concatenate((CD_Cols, CD_Beams), axis=0)
+ print('CD_Cols =', CD_Cols) # noqa: T201
+ CD_Ele = np.concatenate((CD_Cols, CD_Beams), axis=0) # noqa: N806
- Desp_x = np.loadtxt('Pushover/HoriNodes.out')
- Desp_y = np.loadtxt('Pushover/VertNodes.out')
- Nodes_desp_x = ListNodes[:, 1] + 3 * Desp_x[-1, 1:]
- Nodes_desp_y = ListNodes[:, 2] + 3 * Desp_y[-1, 1:]
+ Desp_x = np.loadtxt('Pushover/HoriNodes.out') # noqa: N806
+ Desp_y = np.loadtxt('Pushover/VertNodes.out') # noqa: N806
+ Nodes_desp_x = ListNodes[:, 1] + 3 * Desp_x[-1, 1:] # noqa: N806
+ Nodes_desp_y = ListNodes[:, 2] + 3 * Desp_y[-1, 1:] # noqa: N806
fpos = 0.1
fsize = 1
- DataDC = []
- for Ele in Elements:
+ DataDC = [] # noqa: N806
+ for Ele in Elements: # noqa: N806
xi = Nodes_desp_x[Ele.Nod_ini]
yi = Nodes_desp_y[Ele.Nod_ini]
xe = Nodes_desp_x[Ele.Nod_end]
ye = Nodes_desp_y[Ele.Nod_end]
- x = np.array([xi, xe])
- y = np.array([yi, ye])
- Delta_x = xe - xi
- Delta_y = ye - yi
- xi_CD = xi + fpos * Delta_x
- yi_CD = yi + fpos * Delta_y
- xe_CD = xe - fpos * Delta_x
- ye_CD = ye - fpos * Delta_y
- CD_i = CD_Ele[Ele.EleTag - 1, 0]
- CD_e = CD_Ele[Ele.EleTag - 1, 1]
+ x = np.array([xi, xe]) # noqa: F841
+ y = np.array([yi, ye]) # noqa: F841
+ Delta_x = xe - xi # noqa: N806
+ Delta_y = ye - yi # noqa: N806
+ xi_CD = xi + fpos * Delta_x # noqa: N806
+ yi_CD = yi + fpos * Delta_y # noqa: N806
+ xe_CD = xe - fpos * Delta_x # noqa: N806
+ ye_CD = ye - fpos * Delta_y # noqa: N806
+ CD_i = CD_Ele[Ele.EleTag - 1, 0] # noqa: N806
+ CD_e = CD_Ele[Ele.EleTag - 1, 1] # noqa: N806
DataDC.append(
DuctilityCurve(
xi_CD, xe_CD, yi_CD, ye_CD, fsize * CD_i, fsize * CD_e
)
)
- DC_x, DC_y, DC_size = [], [], []
- for DC in DataDC:
+ DC_x, DC_y, DC_size = [], [], [] # noqa: N806
+ for DC in DataDC: # noqa: N806
DC_x.append([DC.xi, DC.xe])
DC_y.append([DC.yi, DC.ye])
DC_size.append([DC.CD_i, DC.CD_e])
- DC_x = np.array(DC_x)
- DC_x = DC_x.flatten()
- DC_y = np.array(DC_y)
- DC_y = DC_y.flatten()
- DC_size = np.array(DC_size)
- DC_size = DC_size.flatten()
- print('DC_x= ', DC_x)
- print('DC_y= ', DC_y)
- print('DC_size= ', DC_size)
+ DC_x = np.array(DC_x) # noqa: N806
+ DC_x = DC_x.flatten() # noqa: N806
+ DC_y = np.array(DC_y) # noqa: N806
+ DC_y = DC_y.flatten() # noqa: N806
+ DC_size = np.array(DC_size) # noqa: N806
+ DC_size = DC_size.flatten() # noqa: N806
+ print('DC_x= ', DC_x) # noqa: T201
+ print('DC_y= ', DC_y) # noqa: T201
+ print('DC_size= ', DC_size) # noqa: T201
if __name__ == '__main__':
diff --git a/modules/createSAM/customPyInput/CustomPyInput.py b/modules/createSAM/customPyInput/CustomPyInput.py
index 4416dacca..45e0fa2e7 100644
--- a/modules/createSAM/customPyInput/CustomPyInput.py
+++ b/modules/createSAM/customPyInput/CustomPyInput.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2022 Leland Stanford Junior University
# Copyright (c) 2022 The Regents of the University of California
#
@@ -42,30 +42,30 @@
import sys
-def create_SAM(
- AIM_file,
- EVENT_file,
- SAM_file,
+def create_SAM( # noqa: N802, D103
+ AIM_file, # noqa: N803
+ EVENT_file, # noqa: ARG001, N803
+ SAM_file, # noqa: N803
model_script,
model_path,
ndm,
dof_map,
column_line,
- getRV,
+ getRV, # noqa: ARG001, N803
):
# KZ: modifying BIM to AIM
- with open(AIM_file, encoding='utf-8') as f:
- root_AIM = json.load(f)
- root_GI = root_AIM['GeneralInformation']
+ with open(AIM_file, encoding='utf-8') as f: # noqa: PTH123
+ root_AIM = json.load(f) # noqa: N806
+ root_GI = root_AIM['GeneralInformation'] # noqa: N806
try:
stories = int(root_GI['NumberOfStories'])
- except:
- raise ValueError('number of stories information missing')
+ except: # noqa: E722
+ raise ValueError('number of stories information missing') # noqa: B904, EM101, TRY003
if column_line is None:
# KZ: looking into SAM
- root_SAM = root_AIM.get('Modeling', {})
+ root_SAM = root_AIM.get('Modeling', {}) # noqa: N806
nodes = root_SAM.get('centroidNodes', [])
if len(nodes) == 0:
nodes = list(range(stories + 1))
@@ -82,7 +82,7 @@ def create_SAM(
node_entry['floor'] = f'{floor}'
node_map.append(node_entry)
- root_SAM = {
+ root_SAM = { # noqa: N806
'mainScript': model_script,
'modelPath': model_path,
'dofMap': dof_map,
@@ -92,19 +92,19 @@ def create_SAM(
'numStory': stories,
# KZ: correcting the ndm format --> this causing standardEarthquakeEDP failure...
'ndm': int(ndm),
- # TODO: improve this if we want random vars in the structure
+ # TODO: improve this if we want random vars in the structure # noqa: TD002
'randomVar': [],
}
# pass all other attributes in the AIM GI to SAM
- for cur_key in root_GI.keys():
+ for cur_key in root_GI.keys(): # noqa: SIM118
cur_item = root_GI.get(cur_key, None)
- if cur_key in root_SAM.keys():
+ if cur_key in root_SAM.keys(): # noqa: SIM118
pass
else:
root_SAM[cur_key] = cur_item
- with open(SAM_file, 'w', encoding='utf-8') as f:
+ with open(SAM_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(root_SAM, f, indent=2)
diff --git a/modules/createSAM/openSeesPyInput/OpenSeesPyInput.py b/modules/createSAM/openSeesPyInput/OpenSeesPyInput.py
index b2dcb62b7..44eb8d89e 100644
--- a/modules/createSAM/openSeesPyInput/OpenSeesPyInput.py
+++ b/modules/createSAM/openSeesPyInput/OpenSeesPyInput.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -42,24 +42,24 @@
import sys
-def create_SAM(
- BIM_file,
- EVENT_file,
- SAM_file,
+def create_SAM( # noqa: N802, D103
+ BIM_file, # noqa: N803
+ EVENT_file, # noqa: ARG001, N803
+ SAM_file, # noqa: N803
model_script,
model_path,
ndm,
dof_map,
column_line,
- getRV,
+ getRV, # noqa: ARG001, N803
):
- with open(BIM_file, encoding='utf-8') as f:
- root_BIM = json.load(f)['GeneralInformation']
+ with open(BIM_file, encoding='utf-8') as f: # noqa: PTH123
+ root_BIM = json.load(f)['GeneralInformation'] # noqa: N806
try:
stories = root_BIM['NumberOfStories']
- except:
- raise ValueError('OpenSeesPyInput - structural information missing')
+ except: # noqa: E722
+ raise ValueError('OpenSeesPyInput - structural information missing') # noqa: B904, EM101, TRY003
if column_line is None:
nodes = list(range(stories + 1))
@@ -75,7 +75,7 @@ def create_SAM(
node_entry['floor'] = f'{floor}'
node_map.append(node_entry)
- root_SAM = {
+ root_SAM = { # noqa: N806
'mainScript': model_script,
'modelPath': model_path,
'dofMap': dof_map,
@@ -84,11 +84,11 @@ def create_SAM(
'NodeMapping': node_map,
'numStory': stories,
'ndm': ndm,
- # TODO: improve this if we want random vars in the structure
+ # TODO: improve this if we want random vars in the structure # noqa: TD002
'randomVar': [],
}
- with open(SAM_file, 'w', encoding='utf-8') as f:
+ with open(SAM_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(root_SAM, f, indent=2)
diff --git a/modules/createSAM/surrogateGP/SurrogateGP.py b/modules/createSAM/surrogateGP/SurrogateGP.py
index 5f30bade3..a1e6f239f 100644
--- a/modules/createSAM/surrogateGP/SurrogateGP.py
+++ b/modules/createSAM/surrogateGP/SurrogateGP.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2022 Leland Stanford Junior University
# Copyright (c) 2022 The Regents of the University of California
#
@@ -48,53 +48,53 @@
import sys
-def create_SAM(AIM_file, SAM_file):
+def create_SAM(AIM_file, SAM_file): # noqa: N802, N803, D103
#
# Find SAM.json info from surrogate model file
#
# load AIM
- with open(AIM_file) as f:
- root_AIM = json.load(f)
+ with open(AIM_file) as f: # noqa: PTH123
+ root_AIM = json.load(f) # noqa: N806
- print('General Information tab is ignored')
- root_SAM = root_AIM['Applications']['Modeling']
+ print('General Information tab is ignored') # noqa: T201
+ root_SAM = root_AIM['Applications']['Modeling'] # noqa: N806
# find and load surrogate json
# surrogate_path = os.path.join(root_SAM['ApplicationData']['MS_Path'],root_SAM['ApplicationData']['mainScript'])
- surrogate_path = os.path.join(
- os.getcwd(),
+ surrogate_path = os.path.join( # noqa: PTH118
+ os.getcwd(), # noqa: PTH109
root_SAM['ApplicationData']['mainScript'],
)
- print(surrogate_path)
+ print(surrogate_path) # noqa: T201
- with open(surrogate_path) as f:
+ with open(surrogate_path) as f: # noqa: PTH123
surrogate_model = json.load(f)
# find SAM in surrogate json
- root_SAM = surrogate_model['SAM']
+ root_SAM = surrogate_model['SAM'] # noqa: N806
# sanity check
if root_AIM['Applications']['EDP']['Application'] != 'SurrogateEDP':
- with open('../workflow.err', 'w') as f:
+ with open('../workflow.err', 'w') as f: # noqa: PTH123
f.write('Please select [None] in the EDP tab.')
- exit(-1)
+ exit(-1) # noqa: PLR1722
if (
root_AIM['Applications']['Simulation']['Application']
!= 'SurrogateSimulation'
):
- with open('../workflow.err', 'w') as f:
+ with open('../workflow.err', 'w') as f: # noqa: PTH123
f.write('Please select [None] in the FEM tab.')
- exit(-1)
+ exit(-1) # noqa: PLR1722
# write SAM.json
- with open(SAM_file, 'w') as f:
+ with open(SAM_file, 'w') as f: # noqa: PTH123
json.dump(root_SAM, f, indent=2)
diff --git a/modules/performDL/CBCities/CBCitiesMethods.py b/modules/performDL/CBCities/CBCitiesMethods.py
index 90af4e6f0..53a3faa6c 100644
--- a/modules/performDL/CBCities/CBCitiesMethods.py
+++ b/modules/performDL/CBCities/CBCitiesMethods.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -47,20 +47,20 @@
from scipy.spatial import cKDTree
-def ckdnearest(gdfA, gdfB, gdfB_cols=['pgv']):
- A = np.concatenate([np.array(geom.coords) for geom in gdfA.geometry.to_list()])
- B = [np.array(geom.coords) for geom in gdfB.geometry.to_list()]
- B_ix = tuple(
+def ckdnearest(gdfA, gdfB, gdfB_cols=['pgv']): # noqa: B006, N803, D103
+ A = np.concatenate([np.array(geom.coords) for geom in gdfA.geometry.to_list()]) # noqa: N806
+ B = [np.array(geom.coords) for geom in gdfB.geometry.to_list()] # noqa: N806
+ B_ix = tuple( # noqa: N806
itertools.chain.from_iterable(
[itertools.repeat(i, x) for i, x in enumerate(list(map(len, B)))]
)
)
- B = np.concatenate(B)
+ B = np.concatenate(B) # noqa: N806
ckd_tree = cKDTree(B)
dist, idx = ckd_tree.query(A, k=1)
idx = itemgetter(*idx)(B_ix)
gdf = pd.concat([gdfA, gdfB.loc[idx, gdfB_cols].reset_index(drop=True)], axis=1)
- return gdf
+ return gdf # noqa: RET504
# def pgv_node2pipe(pipe_info,node_info):
@@ -73,7 +73,7 @@ def ckdnearest(gdfA, gdfB, gdfB_cols=['pgv']):
# return pgvs
-def pgv_node2pipe(pipe_info, node_info):
+def pgv_node2pipe(pipe_info, node_info): # noqa: D103
res = []
node_ids = np.array(node_info['node_id'])
@@ -88,14 +88,14 @@ def pgv_node2pipe(pipe_info, node_info):
return res
-def get_prefix(file_path):
+def get_prefix(file_path): # noqa: D103
file_name = file_path.split('/')[-1]
prefix = file_name.split('.')[0]
- return prefix
+ return prefix # noqa: RET504
# Get the PGV value for the pipe
-def add_pgv2pipe(pipe):
+def add_pgv2pipe(pipe): # noqa: D103
reg_event = pipe['RegionalEvent']
events = pipe['Events'][0]
@@ -107,27 +107,27 @@ def add_pgv2pipe(pipe):
pgvs = np.array([])
- for eventFile, scaleFactor in event_array:
+ for eventFile, scaleFactor in event_array: # noqa: N806
# Discard the numbering at the end of the csv file name
- eventFile = eventFile[: len(eventFile) - 8]
+ eventFile = eventFile[: len(eventFile) - 8] # noqa: N806, PLW2901
# Get the path to the event file
- path_Event_File = posixpath.join(event_folder_path, eventFile)
+ path_Event_File = posixpath.join(event_folder_path, eventFile) # noqa: N806
# Read in the event file IM List
- eventIMList = pd.read_csv(path_Event_File, header=0)
+ eventIMList = pd.read_csv(path_Event_File, header=0) # noqa: N806
- PGVCol = eventIMList.loc[:, 'PGV']
+ PGVCol = eventIMList.loc[:, 'PGV'] # noqa: N806
pgv_unit = event_units['PGV']
# Scale the PGVs and account for units - fragility functions are in inch per second
if pgv_unit == 'cmps':
- PGVCol = PGVCol.apply(lambda x: cm2inch(x) * scaleFactor)
+ PGVCol = PGVCol.apply(lambda x: cm2inch(x) * scaleFactor) # noqa: B023, N806
elif pgv_unit == 'inps':
continue
else:
- print("Error, only 'cmps' and 'inps' units are supported for PGV")
+ print("Error, only 'cmps' and 'inps' units are supported for PGV") # noqa: T201
pgvs = np.append(pgvs, PGVCol.values)
@@ -163,28 +163,28 @@ def add_pgv2pipe(pipe):
}
-def cm2inch(cm):
+def cm2inch(cm): # noqa: D103
return 39.3701 * cm / 100
-def calculate_fail_repairrate(k, pgv, l):
+def calculate_fail_repairrate(k, pgv, l): # noqa: E741, D103
rr = k * 0.00187 * pgv / 1000
failure_rate = 1 - np.power(np.e, -rr * l)
- return failure_rate
+ return failure_rate # noqa: RET504
-def get_pipe_failrate(pipe):
- pipe_GI = pipe['GeneralInformation']
+def get_pipe_failrate(pipe): # noqa: D103
+ pipe_GI = pipe['GeneralInformation'] # noqa: N806
- m, l, pgv = pipe_GI['material'], pipe_GI['length'], pipe['pgv']
+ m, l, pgv = pipe_GI['material'], pipe_GI['length'], pipe['pgv'] # noqa: E741
- pipeRR = calculate_fail_repairrate(k_dict[m], l, pgv)
+ pipeRR = calculate_fail_repairrate(k_dict[m], l, pgv) # noqa: N806
- return pipeRR
+ return pipeRR # noqa: RET504
-def add_failrate2pipe(pipe):
+def add_failrate2pipe(pipe): # noqa: D103
pipe = add_pgv2pipe(pipe)
pipe['fail_prob'] = get_pipe_failrate(pipe)
@@ -209,21 +209,21 @@ def add_failrate2pipe(pipe):
# print (f'saved to {save_path}')
-def get_bar_ranges(space):
+def get_bar_ranges(space): # noqa: D103
ranges = []
for i in range(1, len(space)):
- ranges.append((space[i - 1], space[i]))
+ ranges.append((space[i - 1], space[i])) # noqa: PERF401
return ranges
-def get_failure_groups(fail_probs, min_thre=1e-3, num_groups=10):
+def get_failure_groups(fail_probs, min_thre=1e-3, num_groups=10): # noqa: D103
valid_fails = [fail_prob for fail_prob in fail_probs if fail_prob > min_thre]
count, space = np.histogram(valid_fails, num_groups)
ranges = get_bar_ranges(space)
- return ranges
+ return ranges # noqa: RET504
-def get_failed_pipes_mask(pipe_info, groups):
+def get_failed_pipes_mask(pipe_info, groups): # noqa: D103
broken_pipes = np.zeros(len(pipe_info))
for r in groups:
@@ -241,12 +241,12 @@ def get_failed_pipes_mask(pipe_info, groups):
return broken_pipes
-def generate_leak_diameter(pipe_diam, min_ratio=0.05, max_ratio=0.25):
+def generate_leak_diameter(pipe_diam, min_ratio=0.05, max_ratio=0.25): # noqa: D103
r = np.random.uniform(min_ratio, max_ratio)
return pipe_diam * r
-def get_leak_sizes(pipe_info):
+def get_leak_sizes(pipe_info): # noqa: D103
leak_size = np.zeros(len(pipe_info))
for index, row in pipe_info.iterrows():
d, repair = row['diameter'], row['repair']
@@ -256,11 +256,11 @@ def get_leak_sizes(pipe_info):
return leak_size
-def fail_pipes_number(pipe_info):
+def fail_pipes_number(pipe_info): # noqa: D103
fail_probs = np.array(pipe_info['fail_prob'])
groups = get_failure_groups(fail_probs)
failed_pipes_mask = get_failed_pipes_mask(pipe_info, groups)
num_failed_pipes = sum(failed_pipes_mask)
- print(f'number of failed pipes are : {num_failed_pipes}')
+ print(f'number of failed pipes are : {num_failed_pipes}') # noqa: T201
return num_failed_pipes
diff --git a/modules/performDL/CBCities/CBCitiesPipeDamageAssessment.py b/modules/performDL/CBCities/CBCitiesPipeDamageAssessment.py
index 112b89777..f5dd024df 100644
--- a/modules/performDL/CBCities/CBCitiesPipeDamageAssessment.py
+++ b/modules/performDL/CBCities/CBCitiesPipeDamageAssessment.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -44,27 +44,27 @@
import numpy as np
import pandas as pd
-from CBCitiesMethods import *
+from CBCitiesMethods import * # noqa: F403
-def main(node_info, pipe_info):
+def main(node_info, pipe_info): # noqa: D103
# Load Data
- print('Loading the node json file...')
+ print('Loading the node json file...') # noqa: T201
- with open(node_info) as f:
- node_data = json.load(f)
+ with open(node_info) as f: # noqa: PTH123
+ node_data = json.load(f) # noqa: F841
- with open(pipe_info) as f:
+ with open(pipe_info) as f: # noqa: PTH123
pipe_data = json.load(f)
min_id = int(pipe_data[0]['id'])
max_id = int(pipe_data[0]['id'])
- allPipes = []
+ allPipes = [] # noqa: N806
for pipe in pipe_data:
- AIM_file = pipe['file']
+ AIM_file = pipe['file'] # noqa: N806
asst_id = pipe['id']
@@ -72,8 +72,8 @@ def main(node_info, pipe_info):
max_id = max(int(asst_id), max_id)
# Open the AIM file
- with open(AIM_file) as f:
- pipe = AIM_data = json.load(f)
+ with open(AIM_file) as f: # noqa: PTH123
+ pipe = AIM_data = json.load(f) # noqa: N806, F841, PLW2901
allPipes.append(pipe)
@@ -84,30 +84,30 @@ def main(node_info, pipe_info):
import multiprocessing as mp
pool = mp.Pool(mp.cpu_count() - 1)
- results = pool.map(add_failrate2pipe, [pipe for pipe in allPipes])
+ results = pool.map(add_failrate2pipe, [pipe for pipe in allPipes]) # noqa: C416, F405
pool.close()
- df = pd.DataFrame({'DV': {}, 'MeanFailureProbability': {}})
+ df = pd.DataFrame({'DV': {}, 'MeanFailureProbability': {}}) # noqa: PD901
for pipe in results:
- failureProbArray = pipe['fail_prob']
- avgFailureProb = np.average(failureProbArray)
+ failureProbArray = pipe['fail_prob'] # noqa: N806
+ avgFailureProb = np.average(failureProbArray) # noqa: N806
pipe_id = pipe['GeneralInformation']['AIM_id']
- print('pipe_id: ', pipe_id)
+ print('pipe_id: ', pipe_id) # noqa: T201
# print("failureProbArray: ",failureProbArray)
- print('avgFailureProb: ', avgFailureProb)
+ print('avgFailureProb: ', avgFailureProb) # noqa: T201
df2 = pd.DataFrame(
{'DV': pipe_id, 'MeanFailureProbability': avgFailureProb}, index=[0]
)
- df = pd.concat([df, df2], axis=0)
+ df = pd.concat([df, df2], axis=0) # noqa: PD901
# Get the directory for saving the results, assume it is the same one with the AIM file
- aimDir = os.path.dirname(pipe_info)
- aimFileName = os.path.basename(pipe_info)
+ aimDir = os.path.dirname(pipe_info) # noqa: PTH120, N806
+ aimFileName = os.path.basename(pipe_info) # noqa: PTH119, N806, F841
- saveDir = posixpath.join(aimDir, f'DV_{min_id}-{max_id}.csv')
+ saveDir = posixpath.join(aimDir, f'DV_{min_id}-{max_id}.csv') # noqa: N806
df.to_csv(saveDir, index=False)
@@ -117,7 +117,7 @@ def main(node_info, pipe_info):
if __name__ == '__main__':
# Defining the command line arguments
- workflowArgParser = argparse.ArgumentParser(
+ workflowArgParser = argparse.ArgumentParser( # noqa: N816
'Run the CB-cities water distribution damage and loss workflow.',
allow_abbrev=False,
)
@@ -133,7 +133,7 @@ def main(node_info, pipe_info):
)
# Parsing the command line arguments
- wfArgs = workflowArgParser.parse_args()
+ wfArgs = workflowArgParser.parse_args() # noqa: N816
# update the local app dir with the default - if needed
# if wfArgs.appDir is None:
diff --git a/modules/performDL/CBCities/CBCitiesWDNDL.py b/modules/performDL/CBCities/CBCitiesWDNDL.py
index ae06c7df4..5f7257ec6 100644
--- a/modules/performDL/CBCities/CBCitiesWDNDL.py
+++ b/modules/performDL/CBCities/CBCitiesWDNDL.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -47,39 +47,39 @@
import pandas as pd
-def log_msg(msg):
+def log_msg(msg): # noqa: D103
formatted_msg = '{} {}'.format(strftime('%Y-%m-%dT%H:%M:%SZ', gmtime()), msg)
- print(formatted_msg)
+ print(formatted_msg) # noqa: T201
-from CBCitiesMethods import *
+from CBCitiesMethods import * # noqa: E402, F403
-def run_DL_calc(aim_file_path, saveDir, output_name):
+def run_DL_calc(aim_file_path, saveDir, output_name): # noqa: N802, N803, D103
# Load Data
- print('Loading the pipeline json file...')
+ print('Loading the pipeline json file...') # noqa: T201
# Open the AIM file
- with open(aim_file_path) as f:
- pipe = AIM_data = json.load(f)
+ with open(aim_file_path) as f: # noqa: PTH123
+ pipe = AIM_data = json.load(f) # noqa: N806, F841
- add_failrate2pipe(pipe)
+ add_failrate2pipe(pipe) # noqa: F405
- failureRateArray = pipe['fail_prob']
- avgRr = np.average(failureRateArray)
+ failureRateArray = pipe['fail_prob'] # noqa: N806
+ avgRr = np.average(failureRateArray) # noqa: N806
- df = pd.DataFrame({'DV': '0', 'RepairRate': avgRr}, index=[0])
+ df = pd.DataFrame({'DV': '0', 'RepairRate': avgRr}, index=[0]) # noqa: PD901
- savePath = posixpath.join(saveDir, output_name)
+ savePath = posixpath.join(saveDir, output_name) # noqa: N806
df.to_csv(savePath, index=False)
return 0
-def main(args):
+def main(args): # noqa: D103
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--filenameDL')
parser.add_argument('-p', '--demandFile', default=None)
diff --git a/modules/performDL/pelicun3/DL_visuals.py b/modules/performDL/pelicun3/DL_visuals.py
index d344ced0a..93cc245cb 100644
--- a/modules/performDL/pelicun3/DL_visuals.py
+++ b/modules/performDL/pelicun3/DL_visuals.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2023 Leland Stanford Junior University
# Copyright (c) 2023 The Regents of the University of California
#
@@ -57,11 +57,11 @@
# start_time = time.time()
-def plot_fragility(comp_db_path, output_path, create_zip='0'):
+def plot_fragility(comp_db_path, output_path, create_zip='0'): # noqa: C901, D103
if create_zip == '1':
output_path = output_path[:-4]
- if os.path.exists(output_path):
+ if os.path.exists(output_path): # noqa: PTH110
shutil.rmtree(output_path)
Path(output_path).mkdir(parents=True, exist_ok=True)
@@ -71,7 +71,7 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
comp_db_meta = comp_db_path[:-3] + 'json'
if Path(comp_db_meta).is_file():
- with open(comp_db_meta) as f:
+ with open(comp_db_meta) as f: # noqa: PTH123
frag_meta = json.load(f)
else:
frag_meta = None
@@ -82,8 +82,8 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
# for comp_id in frag_df.index[695:705]:
for comp_id in frag_df.index:
comp_data = frag_df.loc[comp_id]
- if frag_meta != None:
- if comp_id in frag_meta.keys():
+ if frag_meta != None: # noqa: E711
+ if comp_id in frag_meta.keys(): # noqa: SIM118
comp_meta = frag_meta[comp_id]
else:
comp_meta = None
@@ -121,8 +121,8 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
d_min = np.inf
d_max = -np.inf
- LS_count = 0
- for LS in limit_states:
+ LS_count = 0 # noqa: N806
+ for LS in limit_states: # noqa: N806
if comp_data.loc[(LS, 'Family')] == 'normal':
d_min_i, d_max_i = norm.ppf(
[p_min, p_max],
@@ -141,14 +141,14 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
else:
continue
- LS_count += 1
+ LS_count += 1 # noqa: N806
d_min = np.min([d_min, d_min_i])
d_max = np.max([d_max, d_max_i])
demand_vals = np.linspace(d_min, d_max, num=100)
- for i_ls, LS in enumerate(limit_states):
+ for i_ls, LS in enumerate(limit_states): # noqa: N806
if comp_data.loc[(LS, 'Family')] == 'normal':
cdf_vals = norm.cdf(
demand_vals,
@@ -170,7 +170,7 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
x=demand_vals,
y=cdf_vals,
mode='lines',
- line=dict(width=3, color=colors[LS_count][i_ls]),
+ line=dict(width=3, color=colors[LS_count][i_ls]), # noqa: C408
name=LS,
),
row=1,
@@ -187,7 +187,7 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
0,
],
mode='lines',
- line=dict(width=3, color=colors[1][0]),
+ line=dict(width=3, color=colors[1][0]), # noqa: C408
name='Incomplete Fragility Data',
),
row=1,
@@ -196,9 +196,9 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
table_vals = []
- for LS in limit_states:
+ for LS in limit_states: # noqa: N806
if (
- np.all(
+ np.all( # noqa: E712
pd.isna(
comp_data[LS][
['Theta_0', 'Family', 'Theta_1', 'DamageStateWeights']
@@ -207,7 +207,7 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
)
== False
):
- table_vals.append(
+ table_vals.append( # noqa: PERF401
np.insert(
comp_data[LS][
['Theta_0', 'Family', 'Theta_1', 'DamageStateWeights']
@@ -222,7 +222,7 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
ds_list = []
ds_i = 1
for dsw in table_vals[-1]:
- if pd.isna(dsw) == True:
+ if pd.isna(dsw) == True: # noqa: E712
ds_list.append(f'DS{ds_i}')
ds_i += 1
@@ -243,13 +243,13 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
table_vals[1] = np.array(ds_list)
font_size = 16
- if ds_i > 8:
+ if ds_i > 8: # noqa: PLR2004
font_size = 8.5
fig.add_trace(
go.Table(
columnwidth=[50, 70, 65, 95, 80],
- header=dict(
+ header=dict( # noqa: C408
values=[
'Limit
State',
'Damage State(s)',
@@ -258,17 +258,17 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
' Capacity
Dispersion',
],
align=['center', 'left', 'center', 'center', 'center'],
- fill=dict(color='rgb(200,200,200)'),
- line=dict(color='black'),
- font=dict(color='black', size=16),
+ fill=dict(color='rgb(200,200,200)'), # noqa: C408
+ line=dict(color='black'), # noqa: C408
+ font=dict(color='black', size=16), # noqa: C408
),
- cells=dict(
+ cells=dict( # noqa: C408
values=table_vals,
height=30,
align=['center', 'left', 'center', 'center', 'center'],
- fill=dict(color='rgba(0,0,0,0)'),
- line=dict(color='black'),
- font=dict(color='black', size=font_size),
+ fill=dict(color='rgba(0,0,0,0)'), # noqa: C408
+ line=dict(color='black'), # noqa: C408
+ font=dict(color='black', size=font_size), # noqa: C408
),
),
row=1,
@@ -280,14 +280,14 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
ds_offset = 0.086
info_font_size = 10
- if ds_i > 8:
+ if ds_i > 8: # noqa: PLR2004
x_loc = 0.4928
y_loc = 0.705 + 0.123
ds_offset = 0.0455
info_font_size = 9
for i_ls, ds_desc in enumerate(ds_list):
- if comp_meta != None:
+ if comp_meta != None: # noqa: E711
ls_meta = comp_meta['LimitStates'][f'LS{i_ls + 1}']
y_loc = y_loc - 0.123
@@ -295,17 +295,17 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
if '
' in ds_desc:
ds_vals = ds_desc.split('
')
- for i_ds, ds_name in enumerate(ds_vals):
+ for i_ds, ds_name in enumerate(ds_vals): # noqa: B007
ds_id = list(ls_meta.keys())[i_ds]
- if ls_meta[ds_id].get('Description', False) != False:
+ if ls_meta[ds_id].get('Description', False) != False: # noqa: E712
ds_description = '
'.join(
wrap(ls_meta[ds_id]['Description'], width=70)
)
else:
ds_description = ''
- if ls_meta[ds_id].get('RepairAction', False) != False:
+ if ls_meta[ds_id].get('RepairAction', False) != False: # noqa: E712
ds_repair = '
'.join(
wrap(ls_meta[ds_id]['RepairAction'], width=70)
)
@@ -328,7 +328,7 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
ayref='pixel',
xanchor='left',
yanchor='bottom',
- font=dict(size=info_font_size),
+ font=dict(size=info_font_size), # noqa: C408
showarrow=False,
ax=0,
ay=0,
@@ -340,16 +340,16 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
else:
# assuming a single Damage State
- ds_id = list(ls_meta.keys())[0]
+ ds_id = list(ls_meta.keys())[0] # noqa: RUF015
- if ls_meta[ds_id].get('Description', False) != False:
+ if ls_meta[ds_id].get('Description', False) != False: # noqa: E712
ds_description = '
'.join(
wrap(ls_meta[ds_id]['Description'], width=70)
)
else:
ds_description = ''
- if ls_meta[ds_id].get('RepairAction', False) != False:
+ if ls_meta[ds_id].get('RepairAction', False) != False: # noqa: E712
ds_repair = '
'.join(
wrap(ls_meta[ds_id]['RepairAction'], width=70)
)
@@ -370,7 +370,7 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
ayref='pixel',
xanchor='left',
yanchor='bottom',
- font=dict(size=info_font_size),
+ font=dict(size=info_font_size), # noqa: C408
showarrow=False,
ax=0,
ay=0,
@@ -378,7 +378,7 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
y=y_loc,
)
- shared_ax_props = dict(
+ shared_ax_props = dict( # noqa: C408
showgrid=True,
linecolor='black',
gridwidth=0.05,
@@ -399,7 +399,7 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
fig.update_layout(
# title = f'{comp_id}',
- margin=dict(b=5, r=5, l=5, t=5),
+ margin=dict(b=5, r=5, l=5, t=5), # noqa: C408
height=300,
width=950,
paper_bgcolor='rgba(0,0,0,0)',
@@ -407,7 +407,7 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
showlegend=False,
)
- with open(f'{output_path}/{comp_id}.html', 'w') as f:
+ with open(f'{output_path}/{comp_id}.html', 'w') as f: # noqa: PTH123
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn'))
# store the source database file(s) in the output directory for future reference
@@ -419,17 +419,17 @@ def plot_fragility(comp_db_path, output_path, create_zip='0'):
if create_zip == '1':
files = [f'{output_path}/{file}' for file in os.listdir(output_path)]
- with ZipFile(output_path + '.zip', 'w') as zip:
+ with ZipFile(output_path + '.zip', 'w') as zip: # noqa: A001
for file in files:
zip.write(file, arcname=Path(file).name)
shutil.rmtree(output_path)
- print('Successfully generated component vulnerability figures.')
+ print('Successfully generated component vulnerability figures.') # noqa: T201
-def plot_repair(comp_db_path, output_path, create_zip='0'):
- # TODO:
+def plot_repair(comp_db_path, output_path, create_zip='0'): # noqa: C901, D103, PLR0912, PLR0915
+ # TODO: # noqa: TD002
# change limit_states names
if create_zip == '1':
@@ -438,7 +438,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
# initialize the output dir
# if exists, remove it
- if os.path.exists(output_path):
+ if os.path.exists(output_path): # noqa: PTH110
shutil.rmtree(output_path)
# then create it
@@ -454,7 +454,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
# check if the metadata is there and open it
if Path(comp_db_meta).is_file():
- with open(comp_db_meta) as f:
+ with open(comp_db_meta) as f: # noqa: PTH123
repair_meta = json.load(f)
else:
# otherwise, assign None to facilitate checks later
@@ -468,8 +468,8 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
comp_data = repair_df.loc[(comp_id, c_type)]
# and the component-specific metadata - if it exists
- if repair_meta != None:
- if comp_id in repair_meta.keys():
+ if repair_meta != None: # noqa: E711
+ if comp_id in repair_meta.keys(): # noqa: SIM118
comp_meta = repair_meta[comp_id]
else:
comp_meta = None
@@ -503,17 +503,17 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
]
# check for each limit state
- for LS in limit_states:
+ for LS in limit_states: # noqa: N806
fields = ['Theta_0', 'Family', 'Theta_1']
- comp_data_LS = comp_data[LS]
+ comp_data_LS = comp_data[LS] # noqa: N806
for optional_label in ['Family', 'Theta_1']:
if optional_label not in comp_data_LS.index:
comp_data_LS[optional_label] = None
# if any of the fields above is set
- if np.all(pd.isna(comp_data_LS[fields].values)) == False:
+ if np.all(pd.isna(comp_data_LS[fields].values)) == False: # noqa: E712
# Then we assume that is valuable information that needs to be
# shown in the table while the other fields will show 'null'
table_vals.append(np.insert(comp_data_LS[fields].values, 0, LS))
@@ -528,44 +528,44 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
for ds_i, val in enumerate(table_vals[1]):
if '|' in str(val):
table_vals[1][ds_i] = 'varies'
- elif pd.isna(val) == True:
+ elif pd.isna(val) == True: # noqa: E712
table_vals[1][ds_i] = 'N/A'
else:
conseq_val = float(val)
if conseq_val < 1:
table_vals[1][ds_i] = f'{conseq_val:.4g}'
- elif conseq_val < 10:
+ elif conseq_val < 10: # noqa: PLR2004
table_vals[1][ds_i] = f'{conseq_val:.3g}'
- elif conseq_val < 1e6:
+ elif conseq_val < 1e6: # noqa: PLR2004
table_vals[1][ds_i] = f'{conseq_val:.0f}'
else:
table_vals[1][ds_i] = f'{conseq_val:.3g}'
# round dispersion parameters to 2 digits
table_vals[-1] = [
- f'{float(sig):.2f}' if pd.isna(sig) == False else 'N/A'
+ f'{float(sig):.2f}' if pd.isna(sig) == False else 'N/A' # noqa: E712
for sig in table_vals[-1]
]
# replace missing distribution labels with N/A
table_vals[-2] = [
- family if pd.isna(family) == False else 'N/A'
+ family if pd.isna(family) == False else 'N/A' # noqa: E712
for family in table_vals[-2]
]
# converted simultaneous damage models might have a lot of DSs
- if table_vals.shape[1] > 8:
+ if table_vals.shape[1] > 8: # noqa: PLR2004
lots_of_ds = True
else:
lots_of_ds = False
# set the font size
- font_size = 16 if lots_of_ds == False else 11
+ font_size = 16 if lots_of_ds == False else 11 # noqa: E712
# create the table
# properties shared between consequence types
- c_pad = (9 - len(c_type)) * ' '
+ c_pad = (9 - len(c_type)) * ' ' # noqa: F841
table_header = [
'Damage
State',
'Median
Conseq.',
@@ -578,20 +578,20 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
fig.add_trace(
go.Table(
columnwidth=column_widths,
- header=dict(
+ header=dict( # noqa: C408
values=table_header,
align=cell_alignment,
- fill=dict(color='rgb(200,200,200)'),
- line=dict(color='black'),
- font=dict(color='black', size=16),
+ fill=dict(color='rgb(200,200,200)'), # noqa: C408
+ line=dict(color='black'), # noqa: C408
+ font=dict(color='black', size=16), # noqa: C408
),
- cells=dict(
+ cells=dict( # noqa: C408
values=table_vals,
- height=30 if lots_of_ds == False else 19,
+ height=30 if lots_of_ds == False else 19, # noqa: E712
align=cell_alignment,
- fill=dict(color='rgba(0,0,0,0)'),
- line=dict(color='black'),
- font=dict(color='black', size=font_size),
+ fill=dict(color='rgba(0,0,0,0)'), # noqa: C408
+ line=dict(color='black'), # noqa: C408
+ font=dict(color='black', size=font_size), # noqa: C408
),
),
row=1,
@@ -622,7 +622,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
if comp_data.loc[('Incomplete', '')] != 1:
# set the parameters for displaying uncertainty
- p_min, p_max = 0.16, 0.84 # +- 1 std
+ p_min, p_max = 0.16, 0.84 # +- 1 std # noqa: F841
# initialize quantity limits
q_min = 0
@@ -647,13 +647,13 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
q_max = 1.0
# anchor locations for annotations providing DS information
- x_loc = 0.533 if lots_of_ds == False else 0.535
- y_space = 0.088 if lots_of_ds == False else 0.0543
- y_loc = 0.784 + y_space if lots_of_ds == False else 0.786 + y_space
- info_font_size = 10 if lots_of_ds == False else 9
+ x_loc = 0.533 if lots_of_ds == False else 0.535 # noqa: E712
+ y_space = 0.088 if lots_of_ds == False else 0.0543 # noqa: E712
+ y_loc = 0.784 + y_space if lots_of_ds == False else 0.786 + y_space # noqa: E712
+ info_font_size = 10 if lots_of_ds == False else 9 # noqa: E712
# x anchor for annotations providing median function data
- x_loc_func = 0.697 if lots_of_ds == False else 0.689
+ x_loc_func = 0.697 if lots_of_ds == False else 0.689 # noqa: E712
need_x_axis = False
@@ -696,7 +696,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
x=q_vals,
y=c_vals,
mode='lines',
- line=dict(
+ line=dict( # noqa: C408
width=3,
color=colors[np.min([len(model_params[1]), 7])][
ds_i % 7
@@ -711,7 +711,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
# check if dispersion is prescribed for this consequence
dispersion = model_params[3][ds_i]
- if (pd.isna(dispersion) == False) and (dispersion != 'N/A'):
+ if (pd.isna(dispersion) == False) and (dispersion != 'N/A'): # noqa: E712
dispersion = float(dispersion)
if model_params[2][ds_i] == 'normal':
@@ -735,7 +735,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
x=q_vals,
y=std_plus,
mode='lines',
- line=dict(
+ line=dict( # noqa: C408
width=1,
color=colors[np.min([len(model_params[1]), 7])][
ds_i % 7
@@ -755,7 +755,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
x=q_vals,
y=std_minus,
mode='lines',
- line=dict(
+ line=dict( # noqa: C408
width=1,
color=colors[np.min([len(model_params[1]), 7])][
ds_i % 7
@@ -813,7 +813,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
x=c_pdf,
y=q_pdf,
mode='lines',
- line=dict(
+ line=dict( # noqa: C408
width=1,
color=colors[np.min([len(model_params[1]), 7])][
ds_i % 7
@@ -848,7 +848,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
ayref='pixel',
xanchor='left',
yanchor='bottom',
- font=dict(size=info_font_size),
+ font=dict(size=info_font_size), # noqa: C408
showarrow=False,
ax=0,
ay=0,
@@ -857,17 +857,17 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
)
# check if metadata is available
- if comp_meta != None:
+ if comp_meta != None: # noqa: E711
ds_meta = comp_meta['DamageStates'][f'DS{ds_i + 1}']
- if ds_meta.get('Description', False) != False:
+ if ds_meta.get('Description', False) != False: # noqa: E712
ds_description = '
'.join(
wrap(ds_meta['Description'], width=55)
)
else:
ds_description = ''
- if ds_meta.get('RepairAction', False) != False:
+ if ds_meta.get('RepairAction', False) != False: # noqa: E712
ds_repair = '
'.join(
wrap(ds_meta['RepairAction'], width=55)
)
@@ -890,7 +890,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
ayref='pixel',
xanchor='left',
yanchor='bottom',
- font=dict(size=info_font_size),
+ font=dict(size=info_font_size), # noqa: C408
showarrow=False,
ax=0,
ay=0,
@@ -909,14 +909,14 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
0,
],
mode='lines',
- line=dict(width=3, color=colors[1][0]),
+ line=dict(width=3, color=colors[1][0]), # noqa: C408
name=f'Incomplete Repair {c_type} Consequence Data',
),
row=1,
col=2,
)
- shared_ax_props = dict(
+ shared_ax_props = dict( # noqa: C408
showgrid=True,
linecolor='black',
gridwidth=0.05,
@@ -936,7 +936,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
# layout settings
fig.update_layout(
# minimize margins
- margin=dict(b=50, r=5, l=5, t=5),
+ margin=dict(b=50, r=5, l=5, t=5), # noqa: C408
# height and width targets single-column web view
height=400,
width=950,
@@ -950,24 +950,24 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
range=[q_min, q_max],
**shared_ax_props,
)
- if need_x_axis == True
- else dict(showgrid=False, showticklabels=False),
+ if need_x_axis == True # noqa: E712
+ else dict(showgrid=False, showticklabels=False), # noqa: C408
yaxis1=dict(
title_text=f'{c_type} [{dv_unit}]',
rangemode='tozero',
**shared_ax_props,
),
- xaxis2=dict(
+ xaxis2=dict( # noqa: C408
showgrid=False,
showticklabels=False,
title_text='',
),
- yaxis2=dict(showgrid=False, showticklabels=False),
+ yaxis2=dict(showgrid=False, showticklabels=False), # noqa: C408
# position legend to top of the figure
- legend=dict(
+ legend=dict( # noqa: C408
yanchor='top',
xanchor='right',
- font=dict(size=12),
+ font=dict(size=12), # noqa: C408
orientation='v',
y=1.0,
x=-0.08,
@@ -975,7 +975,7 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
)
# save figure to html
- with open(f'{output_path}/{comp_id}-{c_type}.html', 'w') as f:
+ with open(f'{output_path}/{comp_id}-{c_type}.html', 'w') as f: # noqa: PTH123
# Minimize size by not saving javascript libraries which means
# internet connection is required to view the figure.
f.write(fig.to_html(full_html=False, include_plotlyjs='cdn'))
@@ -989,18 +989,18 @@ def plot_repair(comp_db_path, output_path, create_zip='0'):
if create_zip == '1':
files = [f'{output_path}/{file}' for file in os.listdir(output_path)]
- with ZipFile(output_path + '.zip', 'w') as zip:
+ with ZipFile(output_path + '.zip', 'w') as zip: # noqa: A001
for file in files:
zip.write(file, arcname=Path(file).name)
shutil.rmtree(output_path)
- print('Successfully generated component repair consequence figures.')
+ print('Successfully generated component repair consequence figures.') # noqa: T201
-def check_diff(comp_db_path, output_path):
+def check_diff(comp_db_path, output_path): # noqa: D103
# if the output path already exists
- if os.path.exists(output_path):
+ if os.path.exists(output_path): # noqa: PTH110
# check for both the csv and json files
for ext in ['csv', 'json']:
comp_db = comp_db_path[:-3] + ext
@@ -1015,7 +1015,7 @@ def check_diff(comp_db_path, output_path):
# check if a file with the same name exists in the output dir
if comp_db in os.listdir(output_path):
# open the two files and compare them
- with open(Path(source_path) / comp_db) as f1, open(
+ with open(Path(source_path) / comp_db) as f1, open( # noqa: PTH123
Path(output_path) / comp_db
) as f2:
if ext == 'csv':
@@ -1028,7 +1028,7 @@ def check_diff(comp_db_path, output_path):
continue
# if at least one line does not match, we need to generate
- else:
+ else: # noqa: RET507
return True
elif ext == 'json':
@@ -1040,7 +1040,7 @@ def check_diff(comp_db_path, output_path):
continue
# otherwise, we need to generate
- else:
+ else: # noqa: RET507
return True
# if there is no db file in the output dir, we need to generate
@@ -1051,11 +1051,11 @@ def check_diff(comp_db_path, output_path):
return False
# if the output path does not exist, we need to generate
- else:
+ else: # noqa: RET505
return True
-def main(args):
+def main(args): # noqa: D103
parser = argparse.ArgumentParser()
parser.add_argument('viz_type')
parser.add_argument('comp_db_path')
@@ -1072,7 +1072,7 @@ def main(args):
# verify that comp_db_path points to a file
if not Path(comp_db_path).is_file():
- raise FileNotFoundError('comp_db_path does not point to a file.')
+ raise FileNotFoundError('comp_db_path does not point to a file.') # noqa: EM101, TRY003
if check_diff(comp_db_path, output_path):
if args.viz_type == 'fragility':
@@ -1082,11 +1082,11 @@ def main(args):
plot_repair(comp_db_path, output_path, args.zip)
else:
- print('No need to generate, figures already exist in the output folder.')
+ print('No need to generate, figures already exist in the output folder.') # noqa: T201
elif args.viz_type == 'query':
if args.comp_db_path == 'default_db':
- print(pelicun_path)
+ print(pelicun_path) # noqa: T201
# print("--- %s seconds ---" % (time.time() - start_time))
diff --git a/modules/performDL/pelicun3/HDF_to_CSV.py b/modules/performDL/pelicun3/HDF_to_CSV.py
index ecdb5cc33..88bdd59dc 100644
--- a/modules/performDL/pelicun3/HDF_to_CSV.py
+++ b/modules/performDL/pelicun3/HDF_to_CSV.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -43,15 +43,15 @@
import pandas as pd
-def convert_HDF(HDF_path):
- HDF_ext = HDF_path.split('.')[-1]
- CSV_base = HDF_path[: -len(HDF_ext) - 1]
+def convert_HDF(HDF_path): # noqa: N802, N803, D103
+ HDF_ext = HDF_path.split('.')[-1] # noqa: N806
+ CSV_base = HDF_path[: -len(HDF_ext) - 1] # noqa: N806
- HDF_path = Path(HDF_path).resolve()
+ HDF_path = Path(HDF_path).resolve() # noqa: N806
store = pd.HDFStore(HDF_path)
- for key in store.keys():
+ for key in store.keys(): # noqa: SIM118
store[key].to_csv(f'{CSV_base}_{key[1:].replace("/", "_")}.csv')
store.close()
diff --git a/modules/performDL/pelicun3/pelicun3_wrapper.py b/modules/performDL/pelicun3/pelicun3_wrapper.py
index e0c5552ac..f94c9f218 100644
--- a/modules/performDL/pelicun3/pelicun3_wrapper.py
+++ b/modules/performDL/pelicun3/pelicun3_wrapper.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
diff --git a/modules/performFEM/surrogateGP/gpPredict.py b/modules/performFEM/surrogateGP/gpPredict.py
index 9b08bfa2f..5582a5414 100644
--- a/modules/performFEM/surrogateGP/gpPredict.py
+++ b/modules/performFEM/surrogateGP/gpPredict.py
@@ -1,6 +1,6 @@
-import json as json
+import json as json # noqa: INP001, D100, PLC0414
import os
-import pickle as pickle
+import pickle as pickle # noqa: PLC0414
import shutil
import subprocess
import sys
@@ -10,40 +10,40 @@
from scipy.stats import lognorm, norm
try:
- moduleName = 'GPy'
- import GPy as GPy
-except:
- print(
+ moduleName = 'GPy' # noqa: N816
+ import GPy as GPy # noqa: PLC0414
+except: # noqa: E722
+ print( # noqa: T201
'Error running surrogate prediction - Failed to import module: Surrogate modeling module uses GPy python package which is facing a version compatibility issue at this moment (01.05.2024). To use the surrogate module, one needs to update manually the GPy version to 1.13. The instruction can be found in the the documentation: https://nheri-simcenter.github.io/quoFEM-Documentation/common/user_manual/usage/desktop/SimCenterUQSurrogate.html#lblsimsurrogate'
)
- exit(-1)
+ exit(-1) # noqa: PLR1722
try:
- moduleName = 'GPy'
- import GPy as GPy
+ moduleName = 'GPy' # noqa: N816
+ import GPy as GPy # noqa: PLC0414
- moduleName = 'emukit'
+ moduleName = 'emukit' # noqa: N816
from emukit.multi_fidelity.convert_lists_to_array import (
convert_x_list_to_array,
)
- moduleName = 'Pandas'
+ moduleName = 'Pandas' # noqa: N816
import pandas as pd
error_tag = False # global variable
-except:
+except: # noqa: E722
error_tag = True
- print(
+ print( # noqa: T201
'Error running surrogate prediction - Failed to import module:' + moduleName
)
- exit(-1)
+ exit(-1) # noqa: PLR1722
# from emukit.multi_fidelity.convert_lists_to_array import convert_x_list_to_array, convert_xy_lists_to_arrays
-def main(params_dir, surrogate_dir, json_dir, result_file, input_json):
- global error_file
+def main(params_dir, surrogate_dir, json_dir, result_file, input_json): # noqa: C901, D103, PLR0912, PLR0915
+ global error_file # noqa: PLW0602
os_type = sys.platform.lower()
run_type = 'runninglocal'
@@ -52,11 +52,11 @@ def main(params_dir, surrogate_dir, json_dir, result_file, input_json):
# create a log file
#
- msg0 = os.path.basename(os.getcwd()) + ' : '
- file_object = open('surrogateLog.log', 'a')
+ msg0 = os.path.basename(os.getcwd()) + ' : ' # noqa: PTH109, PTH119
+ file_object = open('surrogateLog.log', 'a') # noqa: SIM115, PTH123
- folderName = os.path.basename(os.getcwd())
- sampNum = folderName.split('.')[-1]
+ folderName = os.path.basename(os.getcwd()) # noqa: PTH109, PTH119, N806
+ sampNum = folderName.split('.')[-1] # noqa: N806
#
# read json -- current input file
@@ -67,26 +67,26 @@ def error_exit(msg):
error_file.close()
file_object.write(msg0 + msg) # global file
file_object.close()
- print(msg)
- exit(-1)
+ print(msg) # noqa: T201
+ exit(-1) # noqa: PLR1722
def error_warning(msg):
# error_file.write(msg)
file_object.write(msg)
# print(msg)
- if not os.path.exists(json_dir):
+ if not os.path.exists(json_dir): # noqa: PTH110
msg = 'Error in surrogate prediction: File not found -' + json_dir
error_exit(msg)
- with open(json_dir) as f:
+ with open(json_dir) as f: # noqa: PTH123
try:
sur = json.load(f)
except ValueError:
msg = 'invalid json format: ' + json_dir
error_exit(msg)
- isEEUQ = sur['isEEUQ']
+ isEEUQ = sur['isEEUQ'] # noqa: N806
if isEEUQ:
dakota_path = 'sc_scInput.json'
@@ -94,14 +94,14 @@ def error_warning(msg):
dakota_path = input_json
try:
- with open(dakota_path) as f: # current input file
+ with open(dakota_path) as f: # current input file # noqa: PTH123
inp_tmp = json.load(f)
- except:
+ except: # noqa: E722
try:
# current input file
- with open('sc_inputRWHALE.json') as f:
+ with open('sc_inputRWHALE.json') as f: # noqa: PTH123
inp_tmp = json.load(f)
- except:
+ except: # noqa: S110, E722
pass
try:
@@ -109,17 +109,17 @@ def error_warning(msg):
inp_fem = inp_tmp['Applications']['Modeling']
else:
inp_fem = inp_tmp['FEM']
- except:
+ except: # noqa: E722
inp_fem = {}
- print('invalid json format - dakota.json')
+ print('invalid json format - dakota.json') # noqa: T201
norm_var_thr = inp_fem.get('varThres', 0.02)
when_inaccurate = inp_fem.get('femOption', 'continue')
- do_mf = False
+ do_mf = False # noqa: F841
myseed = inp_fem.get('gpSeed', None)
prediction_option = inp_fem.get('predictionOption', 'random')
- if myseed == None:
- folderName = os.path.basename(os.path.dirname(os.getcwd()))
+ if myseed == None: # noqa: E711
+ folderName = os.path.basename(os.path.dirname(os.getcwd())) # noqa: PTH109, PTH119, PTH120, N806
myseed = int(folderName) * int(1.0e7)
np.random.seed(int(myseed) + int(sampNum))
@@ -143,32 +143,32 @@ def error_warning(msg):
elif kernel == 'Matern 3/2':
kern_name = 'Mat32'
elif kernel == 'Matern 5/2':
- kern_name = 'Mat52'
+ kern_name = 'Mat52' # noqa: F841
did_mf = sur['doMultiFidelity']
# from json
- g_name_sur = list()
+ g_name_sur = list() # noqa: C408
ng_sur = 0
- Y = np.zeros((sur['highFidelityInfo']['valSamp'], sur['ydim']))
+ Y = np.zeros((sur['highFidelityInfo']['valSamp'], sur['ydim'])) # noqa: N806
for g in sur['ylabels']:
g_name_sur += [g]
Y[:, ng_sur] = np.array(sur['yExact'][g])
ng_sur += 1
- rv_name_sur = list()
+ rv_name_sur = list() # noqa: C408
nrv_sur = 0
- X = np.zeros((sur['highFidelityInfo']['valSamp'], sur['xdim']))
+ X = np.zeros((sur['highFidelityInfo']['valSamp'], sur['xdim'])) # noqa: N806
for rv in sur['xlabels']:
rv_name_sur += [rv]
X[:, nrv_sur] = np.array(sur['xExact'][rv])
nrv_sur += 1
try:
- constIdx = sur['highFidelityInfo']['constIdx']
- constVal = sur['highFidelityInfo']['constVal']
- except:
- constIdx = []
- constVal = []
+ constIdx = sur['highFidelityInfo']['constIdx'] # noqa: N806
+ constVal = sur['highFidelityInfo']['constVal'] # noqa: N806
+ except: # noqa: E722
+ constIdx = [] # noqa: N806
+ constVal = [] # noqa: N806
# Read pickles
@@ -184,14 +184,14 @@ def decorator(func):
return decorator
@monkeypatch_method(GPy.likelihoods.Gaussian)
- def gaussian_variance(self, Y_metadata=None):
+ def gaussian_variance(self, Y_metadata=None): # noqa: N803
if Y_metadata is None:
return self.variance
- else:
+ else: # noqa: RET505
return self.variance * Y_metadata['variance_structure']
@monkeypatch_method(GPy.core.GP)
- def set_XY2(self, X=None, Y=None, Y_metadata=None):
+ def set_XY2(self, X=None, Y=None, Y_metadata=None): # noqa: N802, N803
if Y_metadata is not None:
if self.Y_metadata is None:
self.Y_metadata = Y_metadata
@@ -201,24 +201,24 @@ def set_XY2(self, X=None, Y=None, Y_metadata=None):
self.set_XY(X, Y)
- def get_stochastic_variance(X, Y, x, ny):
+ def get_stochastic_variance(X, Y, x, ny): # noqa: N803
# X_unique, X_idx, indices, counts = np.unique(X, axis=0, return_index=True, return_counts=True, return_inverse=True)
- X_unique, dummy, indices, counts = np.unique(
+ X_unique, dummy, indices, counts = np.unique( # noqa: N806
X, axis=0, return_index=True, return_counts=True, return_inverse=True
)
- idx_repl = [i for i in np.where(counts > 1)[0]]
+ idx_repl = [i for i in np.where(counts > 1)[0]] # noqa: C416
if len(idx_repl) > 0:
n_unique = X_unique.shape[0]
- Y_mean, Y_var = np.zeros((n_unique, 1)), np.zeros((n_unique, 1))
+ Y_mean, Y_var = np.zeros((n_unique, 1)), np.zeros((n_unique, 1)) # noqa: N806
for idx in range(n_unique):
- Y_subset = Y[[i for i in np.where(indices == idx)[0]], :]
+ Y_subset = Y[[i for i in np.where(indices == idx)[0]], :] # noqa: C416, N806
Y_mean[idx, :] = np.mean(Y_subset, axis=0)
Y_var[idx, :] = np.var(Y_subset, axis=0)
- if (np.max(Y_var) / np.var(Y_mean) < 1.0e-10) and len(idx_repl) > 0:
+ if (np.max(Y_var) / np.var(Y_mean) < 1.0e-10) and len(idx_repl) > 0: # noqa: PLR2004
return np.ones((X.shape[0], 1))
kernel_var = GPy.kern.Matern52(
@@ -233,16 +233,16 @@ def get_stochastic_variance(X, Y, x, ny):
Y_metadata=None,
)
# print("Collecting variance field of ny={}".format(ny))
- for key, val in sur['modelInfo'][g_name_sur[ny] + '_Var'].items():
- exec('m_var.' + key + '= np.array(val)')
+ for key, val in sur['modelInfo'][g_name_sur[ny] + '_Var'].items(): # noqa: B007, PERF102
+ exec('m_var.' + key + '= np.array(val)') # noqa: S102
log_var_pred, dum = m_var.predict(X_unique)
var_pred = np.exp(log_var_pred)
if did_normalization:
- Y_normFact = np.var(Y_mean)
+ Y_normFact = np.var(Y_mean) # noqa: N806
else:
- Y_normFact = 1
+ Y_normFact = 1 # noqa: N806
norm_var_str = (
(var_pred.T[0]) / Y_normFact
@@ -252,8 +252,8 @@ def get_stochastic_variance(X, Y, x, ny):
nugget_var_pred_x = np.exp(log_var_pred_x.T[0]) / Y_normFact
else:
- X_unique = X
- Y_mean = Y
+ X_unique = X # noqa: N806
+ Y_mean = Y # noqa: N806
indices = range(Y.shape[0])
kernel_var = GPy.kern.Matern52(
@@ -267,16 +267,16 @@ def get_stochastic_variance(X, Y, x, ny):
)
# print("Variance field obtained for ny={}".format(ny))
- for key, val in sur['modelInfo'][g_name_sur[ny] + '_Var'].items():
- exec('m_var.' + key + '= np.array(val)')
+ for key, val in sur['modelInfo'][g_name_sur[ny] + '_Var'].items(): # noqa: B007, PERF102
+ exec('m_var.' + key + '= np.array(val)') # noqa: S102
log_var_pred, dum = m_var.predict(X)
var_pred = np.exp(log_var_pred)
if did_normalization:
- Y_normFact = np.var(Y)
+ Y_normFact = np.var(Y) # noqa: N806
else:
- Y_normFact = 1
+ Y_normFact = 1 # noqa: N806
norm_var_str = (
(var_pred.T[0]) / Y_normFact
@@ -300,7 +300,7 @@ def get_stochastic_variance(X, Y, x, ny):
id_vec = []
rv_name_dummy = []
- t_total = time.process_time()
+ t_total = time.process_time() # noqa: F841
first_rv_found = False
first_dummy_found = False
@@ -308,7 +308,7 @@ def get_stochastic_variance(X, Y, x, ny):
# Check how many RVs overlap
#
- with open(params_dir) as x_file:
+ with open(params_dir) as x_file: # noqa: PTH123
data = x_file.readlines()
nrv = int(data[0])
for i in range(nrv):
@@ -317,7 +317,7 @@ def get_stochastic_variance(X, Y, x, ny):
# print(name)
# = pass if is string. GP cannot handle that
- if ((name == 'MultipleEvent') or (name == 'eventID')) and isEEUQ:
+ if ((name == 'MultipleEvent') or (name == 'eventID')) and isEEUQ: # noqa: PLR1714
continue
if (
@@ -385,25 +385,25 @@ def get_stochastic_variance(X, Y, x, ny):
# if eeuq
first_eeuq_found = False
- if sur.get('intensityMeasureInfo') != None:
- with open('IMinput.json', 'w') as f:
- mySurrogateJson = sur['intensityMeasureInfo']
+ if sur.get('intensityMeasureInfo') != None: # noqa: E711
+ with open('IMinput.json', 'w') as f: # noqa: PTH123
+ mySurrogateJson = sur['intensityMeasureInfo'] # noqa: N806
json.dump(mySurrogateJson, f)
- computeIM = os.path.join(
- os.path.dirname(
- os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ computeIM = os.path.join( # noqa: PTH118, N806
+ os.path.dirname( # noqa: PTH120
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # noqa: PTH100, PTH120
),
'createEVENT',
'groundMotionIM',
'IntensityMeasureComputer.py',
)
- pythonEXE = sys.executable
+ pythonEXE = sys.executable # noqa: N806
# compute IMs
- if os.path.exists('EVENT.json') and os.path.exists('IMinput.json'):
- os.system(
+ if os.path.exists('EVENT.json') and os.path.exists('IMinput.json'): # noqa: PTH110
+ os.system( # noqa: S605
f'{pythonEXE} {computeIM} --filenameAIM IMinput.json --filenameEVENT EVENT.json --filenameIM IM.json --geoMeanVar'
)
else:
@@ -411,15 +411,15 @@ def get_stochastic_variance(X, Y, x, ny):
error_exit(msg)
first_eeuq_found = False
- if os.path.exists('IM.csv'):
+ if os.path.exists('IM.csv'): # noqa: PTH110
# print("IM.csv found")
tmp1 = pd.read_csv(('IM.csv'), index_col=None)
if tmp1.empty:
# print("IM.csv in wordir.{} is empty.".format(cur_id))
return
- IMnames = list(map(str, tmp1))
- IMvals = tmp1.to_numpy()
+ IMnames = list(map(str, tmp1)) # noqa: N806
+ IMvals = tmp1.to_numpy() # noqa: N806
nrv2 = len(IMnames)
for i in range(nrv2):
name = IMnames[i]
@@ -443,16 +443,16 @@ def get_stochastic_variance(X, Y, x, ny):
if ns != nsamp:
msg = 'Error importing input data: sample size in params.in is not consistent.'
error_exit(msg)
- # TODO: fix for different nys m
+ # TODO: fix for different nys m # noqa: TD002
if len(id_vec + id_vec2) != nrv_sur:
- missing_ids = set([i for i in range(len(rv_name_sur))]) - set(
+ missing_ids = set([i for i in range(len(rv_name_sur))]) - set( # noqa: C403, C416
id_vec + id_vec2
)
- s = [str(rv_name_sur[id]) for id in missing_ids]
+ s = [str(rv_name_sur[id]) for id in missing_ids] # noqa: A001
if first_eeuq_found and all(
- [missingEDP.endswith('-2') for missingEDP in s]
+ [missingEDP.endswith('-2') for missingEDP in s] # noqa: C419
):
msg = 'ground motion dimension does not match with that of the training'
# for i in range(len(s)):
@@ -478,11 +478,11 @@ def get_stochastic_variance(X, Y, x, ny):
nrv = len(id_vec)
if nrv != nrv_sur:
# missing_ids = set([i for i in range(len(rv_name_sur))]) - set(id_vec)
- missing_ids = set([i for i in range(len(rv_name_sur))]).difference(
+ missing_ids = set([i for i in range(len(rv_name_sur))]).difference( # noqa: C403, C416
set(id_vec)
)
# print(missing_ids)
- s = [str(rv_name_sur[id]) for id in missing_ids]
+ s = [str(rv_name_sur[id]) for id in missing_ids] # noqa: A001
msg = 'Error in Surrogate prediction: Number of dimension inconsistent: Please define '
msg += ', '.join(s)
msg += ' at RV tab'
@@ -508,16 +508,16 @@ def get_stochastic_variance(X, Y, x, ny):
kr = kr + GPy.kern.Linear(input_dim=nrv_sur, ARD=True)
if did_logtransform:
- Y = np.log(Y)
+ Y = np.log(Y) # noqa: N806
kg = kr
- m_list = list()
+ m_list = list() # noqa: C408
nugget_var_list = [0] * ng_sur
if not did_mf:
for ny in range(ng_sur):
if did_stochastic[ny]:
- m_list = m_list + [
+ m_list = m_list + [ # noqa: RUF005
GPy.models.GPRegression(
X,
Y[:, ny][np.newaxis].transpose(),
@@ -526,17 +526,17 @@ def get_stochastic_variance(X, Y, x, ny):
)
]
(
- X_unique,
- Y_mean,
+ X_unique, # noqa: N806
+ Y_mean, # noqa: N806
norm_var_str,
counts,
nugget_var_pred,
- Y_normFact,
+ Y_normFact, # noqa: N806
) = get_stochastic_variance(X, Y[:, ny][np.newaxis].T, rv_val, ny)
- Y_metadata = {'variance_structure': norm_var_str / counts}
+ Y_metadata = {'variance_structure': norm_var_str / counts} # noqa: N806
m_list[ny].set_XY2(X_unique, Y_mean, Y_metadata=Y_metadata)
- for key, val in sur['modelInfo'][g_name_sur[ny]].items():
- exec('m_list[ny].' + key + '= np.array(val)')
+ for key, val in sur['modelInfo'][g_name_sur[ny]].items(): # noqa: B007, PERF102
+ exec('m_list[ny].' + key + '= np.array(val)') # noqa: S102
nugget_var_list[ny] = (
m_list[ny].Gaussian_noise.parameters
@@ -545,7 +545,7 @@ def get_stochastic_variance(X, Y, x, ny):
)
else:
- m_list = m_list + [
+ m_list = m_list + [ # noqa: RUF005
GPy.models.GPRegression(
X,
Y[:, ny][np.newaxis].transpose(),
@@ -553,21 +553,21 @@ def get_stochastic_variance(X, Y, x, ny):
normalizer=True,
)
]
- for key, val in sur['modelInfo'][g_name_sur[ny]].items():
- exec('m_list[ny].' + key + '= np.array(val)')
+ for key, val in sur['modelInfo'][g_name_sur[ny]].items(): # noqa: B007, PERF102
+ exec('m_list[ny].' + key + '= np.array(val)') # noqa: S102
- Y_normFact = np.var(Y[:, ny])
+ Y_normFact = np.var(Y[:, ny]) # noqa: N806
nugget_var_list[ny] = np.squeeze(
np.array(m_list[ny].Gaussian_noise.parameters)
* np.array(Y_normFact)
)
else:
- with open(surrogate_dir, 'rb') as file:
- m_list = pickle.load(file)
+ with open(surrogate_dir, 'rb') as file: # noqa: PTH123
+ m_list = pickle.load(file) # noqa: S301
for ny in range(ng_sur):
- Y_normFact = np.var(Y[:, ny])
+ Y_normFact = np.var(Y[:, ny]) # noqa: N806
nugget_var_list[ny] = (
m_list[ny].gpy_model['mixed_noise.Gaussian_noise.variance']
* Y_normFact
@@ -725,7 +725,7 @@ def get_stochastic_variance(X, Y, x, ny):
if np.isnan(y_pred_var[:, ny]).any():
y_pred_var[:, ny] = np.nan_to_num(y_pred_var[:, ny])
if np.isnan(y_pred_var_m[:, ny]).any():
- y_pred_m_var[:, ny] = np.nan_to_num(y_pred_m_var[:, ny])
+ y_pred_m_var[:, ny] = np.nan_to_num(y_pred_m_var[:, ny]) # noqa: F821
# for parname in m_list[ny].parameter_names():
# if (kern_name in parname) and parname.endswith('variance'):
@@ -764,43 +764,43 @@ def get_stochastic_variance(X, Y, x, ny):
#
# (1) create "workdir.idx " folder :need C++17 to use the files system namespace
#
- templatedirFolder = os.path.join(os.getcwd(), 'templatedir_SIM')
+ templatedirFolder = os.path.join(os.getcwd(), 'templatedir_SIM') # noqa: PTH109, PTH118, N806
if (
isEEUQ and nsamp == 1
): # because stochastic ground motion generation uses folder number when generating random seed.............
- current_dir_i = os.path.join(
- os.getcwd(),
+ current_dir_i = os.path.join( # noqa: PTH118
+ os.getcwd(), # noqa: PTH109
f'subworkdir.{sampNum}',
)
else:
- current_dir_i = os.path.join(os.getcwd(), f'subworkdir.{1 + ns}')
+ current_dir_i = os.path.join(os.getcwd(), f'subworkdir.{1 + ns}') # noqa: PTH109, PTH118
try:
shutil.copytree(templatedirFolder, current_dir_i)
- except Exception:
+ except Exception: # noqa: BLE001
try:
shutil.copytree(templatedirFolder, current_dir_i)
- except Exception as ex:
+ except Exception as ex: # noqa: BLE001
msg = 'Error running FEM: ' + str(ex)
# change directory, create params.in
if isEEUQ:
shutil.copyfile(
- os.path.join(os.getcwd(), 'params.in'),
- os.path.join(current_dir_i, 'params.in'),
+ os.path.join(os.getcwd(), 'params.in'), # noqa: PTH109, PTH118
+ os.path.join(current_dir_i, 'params.in'), # noqa: PTH118
)
shutil.copyfile(
- os.path.join(os.getcwd(), 'EVENT.json.sc'),
- os.path.join(current_dir_i, 'EVENT.json.sc'),
+ os.path.join(os.getcwd(), 'EVENT.json.sc'), # noqa: PTH109, PTH118
+ os.path.join(current_dir_i, 'EVENT.json.sc'), # noqa: PTH118
)
#
# Replace parts of AIM
#
- with open(os.path.join(current_dir_i, 'AIM.json.sc')) as f:
+ with open(os.path.join(current_dir_i, 'AIM.json.sc')) as f: # noqa: PTH118, PTH123
try:
- AIMsc = json.load(f)
+ AIMsc = json.load(f) # noqa: N806
except ValueError:
msg = 'invalid AIM in template. Simulation of original model cannot be perfomred'
error_exit(msg)
@@ -808,7 +808,7 @@ def get_stochastic_variance(X, Y, x, ny):
AIMsc['Applications']['Events'] = inp_tmp['Applications'][
'Events'
]
- with open(os.path.join(current_dir_i, 'AIM.json.sc'), 'w') as f:
+ with open(os.path.join(current_dir_i, 'AIM.json.sc'), 'w') as f: # noqa: PTH118, PTH123
json.dump(AIMsc, f, indent=2)
#
@@ -816,15 +816,15 @@ def get_stochastic_variance(X, Y, x, ny):
#
for fname in os.listdir(current_dir_i):
if fname.startswith('PEER-Record-'):
- os.remove(os.path.join(current_dir_i, fname))
+ os.remove(os.path.join(current_dir_i, fname)) # noqa: PTH107, PTH118
if fname.startswith('RSN') and fname.endswith('AT2'):
- os.remove(os.path.join(current_dir_i, fname))
+ os.remove(os.path.join(current_dir_i, fname)) # noqa: PTH107, PTH118
- for fname in os.listdir(os.getcwd()):
+ for fname in os.listdir(os.getcwd()): # noqa: PTH109
if fname.startswith('PEER-Record-'):
shutil.copyfile(
- os.path.join(os.getcwd(), fname),
- os.path.join(current_dir_i, fname),
+ os.path.join(os.getcwd(), fname), # noqa: PTH109, PTH118
+ os.path.join(current_dir_i, fname), # noqa: PTH118
)
#
@@ -836,10 +836,10 @@ def get_stochastic_variance(X, Y, x, ny):
else:
driver_name = 'driver'
- with open(os.path.join(os.getcwd(), driver_name)) as f:
+ with open(os.path.join(os.getcwd(), driver_name)) as f: # noqa: PTH109, PTH118, PTH123
event_driver = f.readline()
- with open(os.path.join(current_dir_i, driver_name), 'r+') as f:
+ with open(os.path.join(current_dir_i, driver_name), 'r+') as f: # noqa: PTH118, PTH123
# Read the original contents of the file
contents = f.readlines()
# Modify the first line
@@ -851,7 +851,7 @@ def get_stochastic_variance(X, Y, x, ny):
f.writelines(contents)
else:
- outF = open(current_dir_i + '/params.in', 'w')
+ outF = open(current_dir_i + '/params.in', 'w') # noqa: SIM115, PTH123, N806
outF.write(f'{nrv}\n')
for i in range(nrv):
outF.write(f'{rv_name_sur[i]} {rv_val[ns, i]}\n')
@@ -866,24 +866,24 @@ def get_stochastic_variance(X, Y, x, ny):
os_type.lower().startswith('win')
and run_type.lower() == 'runninglocal'
):
- workflowDriver = 'sc_driver.bat'
+ workflowDriver = 'sc_driver.bat' # noqa: N806
else:
- workflowDriver = 'sc_driver'
+ workflowDriver = 'sc_driver' # noqa: N806
elif (
os_type.lower().startswith('win')
and run_type.lower() == 'runninglocal'
):
- workflowDriver = 'driver.bat'
+ workflowDriver = 'driver.bat' # noqa: N806
else:
- workflowDriver = 'driver'
+ workflowDriver = 'driver' # noqa: N806
workflow_run_command = f'{current_dir_i}/{workflowDriver}'
- subprocess.Popen(workflow_run_command, shell=True).wait()
+ subprocess.Popen(workflow_run_command, shell=True).wait() # noqa: S602
# back to directory, copy result.out
# shutil.copyfile(os.path.join(sim_dir, 'results.out'), os.path.join(os.getcwd(), 'results.out'))
- with open('results.out') as f:
+ with open('results.out') as f: # noqa: PTH123
y_pred = np.array([np.loadtxt(f)]).flatten()
y_pred_subset[ns, :] = y_pred[g_idx]
@@ -946,7 +946,7 @@ def get_stochastic_variance(X, Y, x, ny):
g_name_subset = [g_name_sur[i] for i in g_idx]
if int(sampNum) == 1:
- with open('../surrogateTabHeader.out', 'w') as header_file:
+ with open('../surrogateTabHeader.out', 'w') as header_file: # noqa: PTH123
# write header
# if os.path.getsize('../surrogateTab.out') == 0:
header_file.write(
@@ -972,7 +972,7 @@ def get_stochastic_variance(X, Y, x, ny):
)
# write values
- with open('../surrogateTab.out', 'a') as tab_file:
+ with open('../surrogateTab.out', 'a') as tab_file: # noqa: PTH123
# write header
# if os.path.getsize('../surrogateTab.out') == 0:
# tab_file.write("%eval_id interface "+ " ".join(rv_name_sur) + " "+ " ".join(g_name_subset) + " " + ".median ".join(g_name_subset) + ".median "+ ".q5 ".join(g_name_subset) + ".q5 "+ ".q95 ".join(g_name_subset) + ".q95 " +".var ".join(g_name_subset) + ".var " + ".q5_w_mnoise ".join(g_name_subset) + ".q5_w_mnoise "+ ".q95_w_mnoise ".join(g_name_subset) + ".q95_w_mnoise " +".var_w_mnoise ".join(g_name_subset) + ".var_w_mnoise \n")
@@ -982,11 +982,11 @@ def get_stochastic_variance(X, Y, x, ny):
rv_list = ' '.join(f'{rv:e}' for rv in rv_val[ns, :])
ypred_list = ' '.join(f'{yp:e}' for yp in y_pred_subset[ns, :])
ymedian_list = ' '.join(f'{ym:e}' for ym in y_pred_median_subset[ns, :])
- yQ1_list = ' '.join(f'{yq1:e}' for yq1 in y_q1_subset[ns, :])
- yQ3_list = ' '.join(f'{yq3:e}' for yq3 in y_q3_subset[ns, :])
+ yQ1_list = ' '.join(f'{yq1:e}' for yq1 in y_q1_subset[ns, :]) # noqa: N806
+ yQ3_list = ' '.join(f'{yq3:e}' for yq3 in y_q3_subset[ns, :]) # noqa: N806
ypredvar_list = ' '.join(f'{ypv:e}' for ypv in y_pred_var_subset[ns, :])
- yQ1m_list = ' '.join(f'{yq1:e}' for yq1 in y_q1m_subset[ns, :])
- yQ3m_list = ' '.join(f'{yq3:e}' for yq3 in y_q3m_subset[ns, :])
+ yQ1m_list = ' '.join(f'{yq1:e}' for yq1 in y_q1m_subset[ns, :]) # noqa: N806
+ yQ3m_list = ' '.join(f'{yq3:e}' for yq3 in y_q3m_subset[ns, :]) # noqa: N806
ypredvarm_list = ' '.join(
f'{ypv:e}' for ypv in y_pred_var_m_subset[ns, :]
)
@@ -1018,25 +1018,25 @@ def get_stochastic_variance(X, Y, x, ny):
file_object.close()
-def predict(m, X, did_mf):
+def predict(m, X, did_mf): # noqa: N803, D103
if not did_mf:
return m.predict_noiseless(X)
- else:
- # TODO change below to noiseless
- X_list = convert_x_list_to_array([X, X])
- X_list_l = X_list[: X.shape[0]]
- X_list_h = X_list[X.shape[0] :]
+ else: # noqa: RET505
+ # TODO change below to noiseless # noqa: TD002, TD004
+ X_list = convert_x_list_to_array([X, X]) # noqa: N806
+ X_list_l = X_list[: X.shape[0]] # noqa: N806, F841
+ X_list_h = X_list[X.shape[0] :] # noqa: N806
return m.predict(X_list_h)
if __name__ == '__main__':
- error_file = open('../surrogate.err', 'w')
- inputArgs = sys.argv
+ error_file = open('../surrogate.err', 'w') # noqa: SIM115, PTH123
+ inputArgs = sys.argv # noqa: N816
if not inputArgs[2].endswith('.json'):
msg = 'ERROR: surrogate information file (.json) not set'
error_file.write(msg)
- exit(-1)
+ exit(-1) # noqa: PLR1722
# elif not inputArgs[3].endswith('.pkl'):
# msg = 'ERROR: surrogate model file (.pkl) not set'
@@ -1073,7 +1073,7 @@ def predict(m, X, did_mf):
surrogate_meta_dir = inputArgs[2]
input_json = inputArgs[3] # scInput.json
- if len(inputArgs) > 4:
+ if len(inputArgs) > 4: # noqa: PLR2004
surrogate_dir = inputArgs[4]
else:
surrogate_dir = 'dummy' # not used
diff --git a/modules/performHUA/INCORECensusUtil.py b/modules/performHUA/INCORECensusUtil.py
index 7af2a41c4..2b2da9189 100644
--- a/modules/performHUA/INCORECensusUtil.py
+++ b/modules/performHUA/INCORECensusUtil.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of the SimCenter Backend Applications
@@ -44,48 +44,48 @@
import sys
if __name__ == '__main__':
- print('Pulling census data')
+ print('Pulling census data') # noqa: T201
# Get any missing dependencies
- packageInstalled = False
+ packageInstalled = False # noqa: N816
import requests
if not hasattr(requests, 'get'):
- print('Installing the requests package')
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'requests'])
- packageInstalled = True
+ print('Installing the requests package') # noqa: T201
+ subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'requests']) # noqa: S603
+ packageInstalled = True # noqa: N816
packages = ['geopandas']
for p in packages:
if importlib.util.find_spec(p) is None:
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', p])
- packageInstalled = True
- print('Installing the ' + p + ' package')
+ subprocess.check_call([sys.executable, '-m', 'pip', 'install', p]) # noqa: S603
+ packageInstalled = True # noqa: N816
+ print('Installing the ' + p + ' package') # noqa: T201
- if packageInstalled == True:
- print('New packages were installed. Please restart the process.')
+ if packageInstalled == True: # noqa: E712
+ print('New packages were installed. Please restart the process.') # noqa: T201
sys.exit(0)
parser = argparse.ArgumentParser()
parser.add_argument('--census_config')
args = parser.parse_args()
- with open(args.census_config) as f:
+ with open(args.census_config) as f: # noqa: PTH123
config_info = json.load(f)
# Output directory
output_dir = config_info['OutputDirectory']
try:
- os.mkdir(f'{output_dir}')
- except:
- print('Output folder already exists.')
+ os.mkdir(f'{output_dir}') # noqa: PTH102
+ except: # noqa: E722
+ print('Output folder already exists.') # noqa: T201
# State counties, e.g., ['01001', '01003']
state_counties = config_info['CountiesArray']
# Population demographics vintage, e.g., "2010"
- popDemoVintage = config_info['PopulationDemographicsVintage']
+ popDemoVintage = config_info['PopulationDemographicsVintage'] # noqa: N816
# Custom census vars
census_vars = config_info['CensusVariablesArray']
@@ -94,11 +94,11 @@
acs_vars = config_info['ACSVariablesArray']
if (
- popDemoVintage != '2000'
+ popDemoVintage != '2000' # noqa: PLR1714
and popDemoVintage != '2010'
and popDemoVintage != '2020'
):
- print(
+ print( # noqa: T201
'Only 2000, 2010, and 2020 decennial census data supported. The provided vintage ',
popDemoVintage,
' is not supported',
@@ -107,14 +107,14 @@
sys.exit(-1)
# Vintage for household demographics
- houseIncomeVintage = config_info['HouseholdIncomeVintage']
+ houseIncomeVintage = config_info['HouseholdIncomeVintage'] # noqa: N816
if (
- houseIncomeVintage != '2010'
+ houseIncomeVintage != '2010' # noqa: PLR1714
and houseIncomeVintage != '2015'
and houseIncomeVintage != '2020'
):
- print(
+ print( # noqa: T201
'Only 2010, 2015, and 2020 ACS 5-yr data supported. The provided vintage ',
houseIncomeVintage,
' is not supported',
@@ -138,7 +138,7 @@
# sys.exit(0)
- print('Done pulling census population demographics data')
+ print('Done pulling census population demographics data') # noqa: T201
# Get the household income at the tract (2010 ACS) or block group level (2015 and 2020 ACS)
CensusUtil.get_blockgroupdata_for_income(
@@ -153,6 +153,6 @@
output_dir=output_dir,
)
- print('Done pulling ACS household income data')
+ print('Done pulling ACS household income data') # noqa: T201
sys.exit(0)
diff --git a/modules/performHUA/pyincore_data/censusutil.py b/modules/performHUA/pyincore_data/censusutil.py
index 062d0374f..8b8bc9182 100644
--- a/modules/performHUA/pyincore_data/censusutil.py
+++ b/modules/performHUA/pyincore_data/censusutil.py
@@ -1,4 +1,4 @@
-# Based on the IN-CORE censusutil method
+# Based on the IN-CORE censusutil method # noqa: INP001, D100
# Modified by Dr. Stevan Gavrilovic, UC Berkeley, SimCenter
@@ -22,17 +22,17 @@
class CensusUtil:
- """Utility methods for Census data and API"""
+ """Utility methods for Census data and API""" # noqa: D400
@staticmethod
def generate_census_api_url(
- state: str = None,
- county: str = None,
- year: str = None,
- data_source: str = None,
- columns: str = None,
- geo_type: str = None,
- data_name: str = None,
+ state: str = None, # noqa: RUF013
+ county: str = None, # noqa: RUF013
+ year: str = None, # noqa: RUF013
+ data_source: str = None, # noqa: RUF013
+ columns: str = None, # noqa: RUF013
+ geo_type: str = None, # noqa: RUF013
+ data_name: str = None, # noqa: RUF013
):
"""Create url string to access census data api.
@@ -56,13 +56,13 @@ def generate_census_api_url(
if state is None:
error_msg = 'State value must be provided.'
logger.error(error_msg)
- raise Exception(error_msg)
+ raise Exception(error_msg) # noqa: TRY002
if geo_type is not None:
if county is None:
error_msg = 'State and county value must be provided when geo_type is provided.'
logger.error(error_msg)
- raise Exception(error_msg)
+ raise Exception(error_msg) # noqa: TRY002
# Set up url for Census API
base_url = f'https://api.census.gov/data/{year}/{data_source}'
@@ -93,31 +93,31 @@ def request_census_api(data_url):
Returns:
dict, object: A json list and a dataframe for census api result
- """
+ """ # noqa: D400
# Obtain Census API JSON Data
- request_json = requests.get(data_url)
+ request_json = requests.get(data_url) # noqa: S113
- if request_json.status_code != 200:
+ if request_json.status_code != 200: # noqa: PLR2004
error_msg = 'Failed to download the data from Census API. Please check your parameters.'
# logger.error(error_msg)
- raise Exception(error_msg)
+ raise Exception(error_msg) # noqa: TRY002
# Convert the requested json into pandas dataframe
api_json = request_json.json()
api_df = pd.DataFrame(columns=api_json[0], data=api_json[1:])
- return api_df
+ return api_df # noqa: RET504
@staticmethod
- def get_blockdata_for_demographics(
+ def get_blockdata_for_demographics( # noqa: C901
state_counties: list,
census_vars: list,
vintage: str = '2010',
- out_csv: bool = False,
- out_shapefile: bool = False,
- out_geopackage: bool = False,
- out_geojson: bool = False,
+ out_csv: bool = False, # noqa: FBT001, FBT002
+ out_shapefile: bool = False, # noqa: FBT001, FBT002
+ out_geopackage: bool = False, # noqa: FBT001, FBT002
+ out_geojson: bool = False, # noqa: FBT001, FBT002
file_name: str = 'file_name',
output_dir: str = 'output_dir',
):
@@ -135,7 +135,7 @@ def get_blockdata_for_demographics(
file_name (str): Name of the output files.
output_dir (str): Name of directory used to save output files.
- """
+ """ # noqa: D400
# ***********************
# Get the population data
# ***********************
@@ -146,7 +146,7 @@ def get_blockdata_for_demographics(
get_pop_vars = 'GEO_ID,NAME'
int_vars = census_vars
- if vintage == '2000' or vintage == '2010':
+ if vintage == '2000' or vintage == '2010': # noqa: PLR1714
dataset_name += '/sf1'
# If no variable parameters passed by the user, use the default for 2000 and 2010 vintage
@@ -190,21 +190,21 @@ def get_blockdata_for_demographics(
get_pop_vars += ',' + var
else:
- print('Only 2000, 2010, and 2020 decennial census supported')
+ print('Only 2000, 2010, and 2020 decennial census supported') # noqa: T201
return None
# Make directory to save output
- if not os.path.exists(output_dir):
- os.mkdir(output_dir)
+ if not os.path.exists(output_dir): # noqa: PTH110
+ os.mkdir(output_dir) # noqa: PTH102
# Make a directory to save downloaded shapefiles
shapefile_dir = Path(output_dir) / 'shapefiletemp'
- if not os.path.exists(shapefile_dir):
- os.mkdir(shapefile_dir)
+ if not os.path.exists(shapefile_dir): # noqa: PTH110
+ os.mkdir(shapefile_dir) # noqa: PTH102
# Set to hold the states - needed for 2020 census shapefile download
- stateSet = set()
+ stateSet = set() # noqa: N806
# loop through counties
appended_countydata = [] # start an empty container for the county data
@@ -212,8 +212,8 @@ def get_blockdata_for_demographics(
# deconcatenate state and county values
state = state_county[0:2]
county = state_county[2:5]
- logger.debug('State: ' + state)
- logger.debug('County: ' + county)
+ logger.debug('State: ' + state) # noqa: G003
+ logger.debug('County: ' + county) # noqa: G003
# Add the state to the set
stateSet.add(state)
@@ -223,7 +223,7 @@ def get_blockdata_for_demographics(
state, county, vintage, dataset_name, get_pop_vars, 'block:*'
)
- logger.info('Census API data from: ' + api_hyperlink)
+ logger.info('Census API data from: ' + api_hyperlink) # noqa: G003
# Obtain Census API JSON Data
apidf = CensusUtil.request_census_api(api_hyperlink)
@@ -255,9 +255,9 @@ def get_blockdata_for_demographics(
for var in int_vars:
cen_block[var] = cen_block[var].astype(int)
# cen_block[var] = pd.to_numeric(cen_block[var], errors='coerce').convert_dtypes()
- print(var + ' converted from object to integer')
+ print(var + ' converted from object to integer') # noqa: T201
- if (vintage == '2000' or vintage == '2010') and not census_vars:
+ if (vintage == '2000' or vintage == '2010') and not census_vars: # noqa: PLR1714
# Generate new variables
cen_block['pwhitebg'] = cen_block['P005003'] / cen_block['P005001'] * 100
cen_block['pblackbg'] = cen_block['P005004'] / cen_block['P005001'] * 100
@@ -310,7 +310,7 @@ def get_blockdata_for_demographics(
merge_id = 'GEOID' + vintage[2:4]
# Tigerline provides the blocks for each county, thus each county needs to be downloaded individually
- if vintage == '2000' or vintage == '2010':
+ if vintage == '2000' or vintage == '2010': # noqa: PLR1714
if vintage == '2000':
merge_id = 'BLKIDFP00'
@@ -327,7 +327,7 @@ def get_blockdata_for_demographics(
+ filename
+ '.zip'
)
- print(
+ print( # noqa: T201
(
'Downloading Census Block Shapefiles for State_County: '
+ state_county
@@ -336,19 +336,19 @@ def get_blockdata_for_demographics(
).format(filename=filename)
)
- zip_file = os.path.join(shapefile_dir, filename + '.zip')
- urllib.request.urlretrieve(shapefile_url, zip_file)
+ zip_file = os.path.join(shapefile_dir, filename + '.zip') # noqa: PTH118
+ urllib.request.urlretrieve(shapefile_url, zip_file) # noqa: S310
with ZipFile(zip_file, 'r') as zip_obj:
zip_obj.extractall(path=shapefile_dir)
# Delete the zip file
- os.remove(zip_file)
+ os.remove(zip_file) # noqa: PTH107
- if Path(zip_file).is_file() == True:
- print('Error deleting the zip file ', zip_file)
+ if Path(zip_file).is_file() == True: # noqa: E712
+ print('Error deleting the zip file ', zip_file) # noqa: T201
- print('filename', f'{filename}.shp')
+ print('filename', f'{filename}.shp') # noqa: T201
# Read shapefile to GeoDataFrame
gdf = gpd.read_file(f'{shapefile_dir}/{filename}.shp')
@@ -368,7 +368,7 @@ def get_blockdata_for_demographics(
path = Path(f'{shapefile_dir}/{filename}.shp')
# if file does not exist
- if path.is_file() == False:
+ if path.is_file() == False: # noqa: E712
# Use wget to download the TIGER Shapefile for a county
# options -quiet = turn off wget output
# add directory prefix to save files to folder named after program name
@@ -378,7 +378,7 @@ def get_blockdata_for_demographics(
+ '.zip'
)
- print(
+ print( # noqa: T201
(
'Downloading Census Block Shapefiles for State: '
+ state
@@ -387,20 +387,20 @@ def get_blockdata_for_demographics(
).format(filename=filename)
)
- zip_file = os.path.join(shapefile_dir, filename + '.zip')
- urllib.request.urlretrieve(shapefile_url, zip_file)
+ zip_file = os.path.join(shapefile_dir, filename + '.zip') # noqa: PTH118
+ urllib.request.urlretrieve(shapefile_url, zip_file) # noqa: S310
with ZipFile(zip_file, 'r') as zip_obj:
zip_obj.extractall(path=shapefile_dir)
# Delete the zip file
- os.remove(zip_file)
+ os.remove(zip_file) # noqa: PTH107
- if Path(zip_file).is_file() == True:
- print('Error deleting the zip file ', zip_file)
+ if Path(zip_file).is_file() == True: # noqa: E712
+ print('Error deleting the zip file ', zip_file) # noqa: T201
else:
- print(f'Found file {filename}.shp in cache')
+ print(f'Found file {filename}.shp in cache') # noqa: T201
# Read shapefile to GeoDataFrame
gdf = gpd.read_file(f'{shapefile_dir}/{filename}.shp')
@@ -414,12 +414,12 @@ def get_blockdata_for_demographics(
# Create dataframe from appended block files
shp_block = pd.concat(appended_shp_files)
- print(
+ print( # noqa: T201
'Merging the census population demographics information to the shapefile'
)
# Clean Data - Merge Census demographic data to the appended shapefiles
- cen_shp_block_merged = pd.merge(
+ cen_shp_block_merged = pd.merge( # noqa: PD015
shp_block, cen_block, left_on=merge_id, right_on='blockid', how='left'
)
@@ -469,19 +469,19 @@ def get_blockdata_for_demographics(
# logger.error(error_msg)
# raise Exception(error_msg)
- print('Done creating population demographics shapefile')
+ print('Done creating population demographics shapefile') # noqa: T201
return cen_block[save_columns]
@staticmethod
- def get_blockgroupdata_for_income(
+ def get_blockgroupdata_for_income( # noqa: C901
state_counties: list,
acs_vars: list,
vintage: str = '2010',
- out_csv: bool = False,
- out_shapefile: bool = False,
- out_geopackage: bool = False,
- out_geojson: bool = False,
+ out_csv: bool = False, # noqa: FBT001, FBT002
+ out_shapefile: bool = False, # noqa: FBT001, FBT002
+ out_geopackage: bool = False, # noqa: FBT001, FBT002
+ out_geojson: bool = False, # noqa: FBT001, FBT002
file_name: str = 'file_name',
output_dir: str = 'output_dir',
):
@@ -499,7 +499,7 @@ def get_blockgroupdata_for_income(
file_name (str): Name of the output files.
output_dir (str): Name of directory used to save output files.
- """
+ """ # noqa: D400
# dataset_name (str): ACS dataset name.
dataset_name = 'acs/acs5'
@@ -564,17 +564,17 @@ def get_blockgroupdata_for_income(
get_income_vars += ',' + var
# Make directory to save output
- if not os.path.exists(output_dir):
- os.mkdir(output_dir)
+ if not os.path.exists(output_dir): # noqa: PTH110
+ os.mkdir(output_dir) # noqa: PTH102
# Make a directory to save downloaded shapefiles
shapefile_dir = Path(output_dir) / 'shapefiletemp'
- if not os.path.exists(shapefile_dir):
- os.mkdir(shapefile_dir)
+ if not os.path.exists(shapefile_dir): # noqa: PTH110
+ os.mkdir(shapefile_dir) # noqa: PTH102
# Set to hold the states - needed for 2020 census shapefile download
- stateSet = set()
+ stateSet = set() # noqa: N806
# loop through counties
appended_countydata = [] # start an empty container for the county data
@@ -582,8 +582,8 @@ def get_blockgroupdata_for_income(
# deconcatenate state and county values
state = state_county[0:2]
county = state_county[2:5]
- logger.debug('State: ' + state)
- logger.debug('County: ' + county)
+ logger.debug('State: ' + state) # noqa: G003
+ logger.debug('County: ' + county) # noqa: G003
# Add the state to the set
stateSet.add(state)
@@ -606,7 +606,7 @@ def get_blockgroupdata_for_income(
'block%20group',
)
- logger.info('Census API data from: ' + api_hyperlink)
+ logger.info('Census API data from: ' + api_hyperlink) # noqa: G003
# Obtain Census API JSON Data
apidf = CensusUtil.request_census_api(api_hyperlink)
@@ -655,7 +655,7 @@ def get_blockgroupdata_for_income(
cen_blockgroup[var], errors='coerce'
).convert_dtypes()
# cen_blockgroup[var] = cen_blockgroup[var].astype(int)
- print(var + ' converted from object to integer')
+ print(var + ' converted from object to integer') # noqa: T201
# ### Obtain Data - Download and extract shapefiles
# The Block Group IDs in the Census data are associated with the Block Group boundaries that can be mapped.
@@ -714,7 +714,7 @@ def get_blockgroupdata_for_income(
+ '.zip'
)
- print(
+ print( # noqa: T201
(
'Downloading Census Block Shapefiles for State_County: '
+ state_county
@@ -723,17 +723,17 @@ def get_blockgroupdata_for_income(
).format(filename=filename)
)
- zip_file = os.path.join(shapefile_dir, filename + '.zip')
- urllib.request.urlretrieve(shapefile_url, zip_file)
+ zip_file = os.path.join(shapefile_dir, filename + '.zip') # noqa: PTH118
+ urllib.request.urlretrieve(shapefile_url, zip_file) # noqa: S310
with ZipFile(zip_file, 'r') as zip_obj:
zip_obj.extractall(path=shapefile_dir)
# Delete the zip file
- os.remove(zip_file)
+ os.remove(zip_file) # noqa: PTH107
- if Path(zip_file).is_file() == True:
- print('Error deleting the zip file ', zip_file)
+ if Path(zip_file).is_file() == True: # noqa: E712
+ print('Error deleting the zip file ', zip_file) # noqa: T201
# Read shapefile to GeoDataFrame
gdf = gpd.read_file(f'{shapefile_dir}/{filename}.shp')
@@ -744,7 +744,7 @@ def get_blockgroupdata_for_income(
# Append county data
appended_shp_files.append(gdf)
- elif vintage == '2015' or vintage == '2020':
+ elif vintage == '2015' or vintage == '2020': # noqa: PLR1714
merge_id_right = 'bgid'
# loop through the states
@@ -755,7 +755,7 @@ def get_blockgroupdata_for_income(
path = Path(f'{shapefile_dir}/{filename}.shp')
# if file does not exist
- if path.is_file() == False:
+ if path.is_file() == False: # noqa: E712
# Use wget to download the TIGER Shapefile for the state
# options -quiet = turn off wget output
# add directory prefix to save files to folder named after program name
@@ -765,7 +765,7 @@ def get_blockgroupdata_for_income(
+ '.zip'
)
- print(
+ print( # noqa: T201
(
'Downloading Census Block Shapefiles for State: '
+ state
@@ -774,20 +774,20 @@ def get_blockgroupdata_for_income(
).format(filename=filename)
)
- zip_file = os.path.join(shapefile_dir, filename + '.zip')
- urllib.request.urlretrieve(shapefile_url, zip_file)
+ zip_file = os.path.join(shapefile_dir, filename + '.zip') # noqa: PTH118
+ urllib.request.urlretrieve(shapefile_url, zip_file) # noqa: S310
with ZipFile(zip_file, 'r') as zip_obj:
zip_obj.extractall(path=shapefile_dir)
# Delete the zip file
- os.remove(zip_file)
+ os.remove(zip_file) # noqa: PTH107
- if Path(zip_file).is_file() == True:
- print('Error deleting the zip file ', zip_file)
+ if Path(zip_file).is_file() == True: # noqa: E712
+ print('Error deleting the zip file ', zip_file) # noqa: T201
else:
- print(f'Found file {filename}.shp in cache: ', path)
+ print(f'Found file {filename}.shp in cache: ', path) # noqa: T201
# Read shapefile to GeoDataFrame
gdf = gpd.read_file(f'{shapefile_dir}/{filename}.shp')
@@ -801,10 +801,10 @@ def get_blockgroupdata_for_income(
# Create dataframe from appended county data
shp_blockgroup = pd.concat(appended_shp_files)
- print('Merging the ACS household income information to the shapefile')
+ print('Merging the ACS household income information to the shapefile') # noqa: T201
# Clean Data - Merge Census demographic data to the appended shapefiles
- cen_shp_blockgroup_merged = pd.merge(
+ cen_shp_blockgroup_merged = pd.merge( # noqa: PD015
shp_blockgroup,
cen_blockgroup,
left_on=merge_id_left,
@@ -858,7 +858,7 @@ def get_blockgroupdata_for_income(
# logger.error(error_msg)
# raise Exception(error_msg)
- print('Done creating household income shapefile')
+ print('Done creating household income shapefile') # noqa: T201
return cen_blockgroup[save_columns]
@@ -874,7 +874,7 @@ def convert_dislocation_gpd_to_shapefile(in_gpd, programname, savefile):
"""
# save cen_shp_blockgroup_merged shapefile
- print(
+ print( # noqa: T201
'Shapefile data file saved to: ' + programname + '/' + savefile + '.shp'
)
in_gpd.to_file(programname + '/' + savefile + '.shp')
@@ -891,7 +891,7 @@ def convert_dislocation_gpd_to_geojson(in_gpd, programname, savefile):
"""
# save cen_shp_blockgroup_merged geojson
- print(
+ print( # noqa: T201
'Geodatabase data file saved to: '
+ programname
+ '/'
@@ -912,7 +912,7 @@ def convert_dislocation_gpd_to_geopackage(in_gpd, programname, savefile):
"""
# save cen_shp_blockgroup_merged shapefile
- print(
+ print( # noqa: T201
'GeoPackage data file saved to: '
+ programname
+ '/'
@@ -936,7 +936,7 @@ def convert_dislocation_pd_to_csv(in_pd, save_columns, programname, savefile):
"""
# Save cen_blockgroup dataframe with save_column variables to csv named savefile
- print('CSV data file saved to: ' + programname + '/' + savefile + '.csv')
+ print('CSV data file saved to: ' + programname + '/' + savefile + '.csv') # noqa: T201
in_pd[save_columns].to_csv(
programname + '/' + savefile + '.csv', index=False
)
diff --git a/modules/performHUA/pyincore_data/globals.py b/modules/performHUA/pyincore_data/globals.py
index 55fd1cb80..8e97ce3d7 100644
--- a/modules/performHUA/pyincore_data/globals.py
+++ b/modules/performHUA/pyincore_data/globals.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 University of Illinois and others. All rights reserved.
+# Copyright (c) 2021 University of Illinois and others. All rights reserved. # noqa: INP001, D100
#
# This program and the accompanying materials are made available under the
# terms of the Mozilla Public License v2.0 which accompanies this distribution,
@@ -10,10 +10,10 @@
PACKAGE_VERSION = '0.3.0'
-PYINCORE_DATA_ROOT_FOLDER = os.path.dirname(os.path.dirname(__file__))
+PYINCORE_DATA_ROOT_FOLDER = os.path.dirname(os.path.dirname(__file__)) # noqa: PTH120
-LOGGING_CONFIG = os.path.abspath(
- os.path.join(os.path.abspath(os.path.dirname(__file__)), 'logging.ini')
+LOGGING_CONFIG = os.path.abspath( # noqa: PTH100
+ os.path.join(os.path.abspath(os.path.dirname(__file__)), 'logging.ini') # noqa: PTH100, PTH118, PTH120
)
logging_config.fileConfig(LOGGING_CONFIG)
LOGGER = logging.getLogger('pyincore-data')
diff --git a/modules/performREC/pyrecodes/run_pyrecodes.py b/modules/performREC/pyrecodes/run_pyrecodes.py
index 87cbf2123..c40a68d7c 100644
--- a/modules/performREC/pyrecodes/run_pyrecodes.py
+++ b/modules/performREC/pyrecodes/run_pyrecodes.py
@@ -1,178 +1,237 @@
-import json, os, shapely, argparse, sys, ujson, importlib
+import json, os, shapely, argparse, sys, ujson, importlib # noqa: INP001, I001, E401, D100
import geopandas as gpd
import numpy as np
import pandas as pd
from pathlib import Path
+
# Delete below when pyrecodes can be installed as stand alone
-import sys
+import sys # noqa: F811
+
sys.path.insert(0, '/Users/jinyanzhao/Desktop/SimCenterBuild/r2d_pyrecodes/')
from pyrecodes import main
-def run_pyrecodes(rec_config, inputRWHALE, parallelType, mpiExec, numPROC):
-
+def run_pyrecodes(rec_config, inputRWHALE, parallelType, mpiExec, numPROC): # noqa: ARG001, C901, N803, D103
# Initiate directory
- rec_ouput_dir = os.path.join(inputRWHALE['runDir'],"Results", "Recovery")
- if not os.path.exists(rec_ouput_dir):
- os.mkdir(rec_ouput_dir)
+ rec_ouput_dir = os.path.join(inputRWHALE['runDir'], 'Results', 'Recovery') # noqa: PTH118
+ if not os.path.exists(rec_ouput_dir): # noqa: PTH110
+ os.mkdir(rec_ouput_dir) # noqa: PTH102
# Find the realizations to run
damage_input = rec_config.pop('DamageInput')
- realizations_to_run = select_realizations_to_run(\
- damage_input,inputRWHALE)
-
+ realizations_to_run = select_realizations_to_run(damage_input, inputRWHALE)
# Replace SimCenterDefault with correct path
- cmp_lib = rec_config["ComponentLibrary"]
+ cmp_lib = rec_config['ComponentLibrary']
if cmp_lib.startswith('SimCenterDefault'):
cmp_lib_name = cmp_lib.split('/')[1]
- cmp_lib_dir = os.path.dirname(os.path.realpath(__file__))
- cmp_lib = os.path.join(cmp_lib_dir, cmp_lib_name)
- rec_config["ComponentLibrary"] = cmp_lib
+ cmp_lib_dir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120
+ cmp_lib = os.path.join(cmp_lib_dir, cmp_lib_name) # noqa: PTH118
+ rec_config['ComponentLibrary'] = cmp_lib
# loop through each realizations. Needs to be parallelized
# Create the base of system configuration json
system_configuration = create_system_configuration(rec_config)
# Create the base of main json
- main_json = dict()
- main_json.update({"ComponentLibrary": {
- "ComponentLibraryCreatorClass": "JSONComponentLibraryCreator",
- "ComponentLibraryFile": rec_config["ComponentLibrary"]
- }})
+ main_json = dict() # noqa: C408
+ main_json.update(
+ {
+ 'ComponentLibrary': {
+ 'ComponentLibraryCreatorClass': 'JSONComponentLibraryCreator',
+ 'ComponentLibraryFile': rec_config['ComponentLibrary'],
+ }
+ }
+ )
# initialize a dict to accumulate recovery results stats
- result_det_path = os.path.join(inputRWHALE['runDir'],"Results",
- f"Results_det.json")
- with open(result_det_path, 'r') as f:
+ result_det_path = os.path.join( # noqa: PTH118
+ inputRWHALE['runDir'],
+ 'Results',
+ 'Results_det.json',
+ )
+ with open(result_det_path, 'r') as f: # noqa: PTH123, UP015
results_det = json.load(f)
- result_agg = dict()
- resilience_results = dict()
+ result_agg = dict() # noqa: C408
+ resilience_results = dict() # noqa: C408
# Loop through realizations and run pyrecodes
- numP = 1
- procID = 0
- doParallel = False
- mpi_spec = importlib.util.find_spec("mpi4py")
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
+ doParallel = False # noqa: N806
+ mpi_spec = importlib.util.find_spec('mpi4py')
found = mpi_spec is not None
if found and parallelType == 'parRUN':
import mpi4py
from mpi4py import MPI
+
comm = MPI.COMM_WORLD
- numP = comm.Get_size()
- procID = comm.Get_rank()
- if numP < 2:
- doParallel = False
- numP = 1
- procID = 0
+ numP = comm.Get_size() # noqa: N806
+ procID = comm.Get_rank() # noqa: N806
+ if numP < 2: # noqa: PLR2004
+ doParallel = False # noqa: N806
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
else:
- doParallel = True
+ doParallel = True # noqa: N806
count = 0
- needsInitiation = True
+ needsInitiation = True # noqa: N806
ind_in_rank = 0
- for ind, rlz_ind in enumerate(realizations_to_run):
+ for ind, rlz_ind in enumerate(realizations_to_run): # noqa: B007
# Create a realization directory
if count % numP == procID:
- rlz_dir = os.path.join(rec_ouput_dir,str(rlz_ind))
- if not os.path.exists(rlz_dir):
- os.mkdir(rlz_dir)
+ rlz_dir = os.path.join(rec_ouput_dir, str(rlz_ind)) # noqa: PTH118
+ if not os.path.exists(rlz_dir): # noqa: PTH110
+ os.mkdir(rlz_dir) # noqa: PTH102
# Update the system_configuration json
- damage_rlz_file = os.path.join(inputRWHALE['runDir'],"Results",\
- f"Results_{int(rlz_ind)}.json")
- DamageInput = {"Type": "R2DDamageInput",
- "Parameters": {"DamageFile": damage_rlz_file}}
- system_configuration.update({"DamageInput":DamageInput})
+ damage_rlz_file = os.path.join( # noqa: PTH118
+ inputRWHALE['runDir'], 'Results', f'Results_{int(rlz_ind)}.json'
+ )
+ DamageInput = { # noqa: N806
+ 'Type': 'R2DDamageInput',
+ 'Parameters': {'DamageFile': damage_rlz_file},
+ }
+ system_configuration.update({'DamageInput': DamageInput})
# Write the system_configureation to a file
- system_configuration_file = os.path.join(rlz_dir, \
- "SystemConfiguration.json")
- with open(system_configuration_file, 'w') as f:
+ system_configuration_file = os.path.join( # noqa: PTH118
+ rlz_dir, 'SystemConfiguration.json'
+ )
+ with open(system_configuration_file, 'w') as f: # noqa: PTH123
ujson.dump(system_configuration, f)
-
# Update the main json
- main_json.update({"System": {
- "SystemCreatorClass": "ConcreteSystemCreator",
- "SystemClass": "BuiltEnvironmentSystem",
- "SystemConfigurationFile": system_configuration_file
- }})
+ main_json.update(
+ {
+ 'System': {
+ 'SystemCreatorClass': 'ConcreteSystemCreator',
+ 'SystemClass': 'BuiltEnvironmentSystem',
+ 'SystemConfigurationFile': system_configuration_file,
+ }
+ }
+ )
# Write the main json to a file
- main_file = os.path.join(rlz_dir, "main.json")
- with open(main_file, 'w') as f:
+ main_file = os.path.join(rlz_dir, 'main.json') # noqa: PTH118
+ with open(main_file, 'w') as f: # noqa: PTH123
ujson.dump(main_json, f)
system = main.run(main_file)
system.calculate_resilience()
- # Append the recovery time to results_rlz
+ # Append the recovery time to results_rlz
if needsInitiation:
- needsInitiation = False
- num_of_rlz_per_rank = int(np.floor(len(realizations_to_run)/numP))
- if procID < len(realizations_to_run)%numP:
+ needsInitiation = False # noqa: N806
+ num_of_rlz_per_rank = int(np.floor(len(realizations_to_run) / numP))
+ if procID < len(realizations_to_run) % numP:
num_of_rlz_per_rank += 1
# Initialize resilience_results
- resilience_results_buffer = dict()
+ resilience_results_buffer = dict() # noqa: C408
resilience_calculator_id = 0
- resilience_results.update({
- "time_steps": list(range(0, system.MAX_TIME_STEP+1))
- })
- resources_to_plot = system.resilience_calculators[resilience_calculator_id].system_supply.keys()
- for resource_name in resources_to_plot:
- resilience_results_buffer.update({
- resource_name: {
- "Supply": np.zeros([num_of_rlz_per_rank, system.MAX_TIME_STEP+1]),
- "Demand": np.zeros([num_of_rlz_per_rank, system.MAX_TIME_STEP+1]),
- "Consumption": np.zeros([num_of_rlz_per_rank, system.MAX_TIME_STEP+1])
+ resilience_results.update(
+ {
+ 'time_steps': list(range(0, system.MAX_TIME_STEP + 1)) # noqa: PIE808
+ }
+ )
+ resources_to_plot = system.resilience_calculators[
+ resilience_calculator_id
+ ].system_supply.keys()
+ for resource_name in resources_to_plot:
+ resilience_results_buffer.update(
+ {
+ resource_name: {
+ 'Supply': np.zeros(
+ [num_of_rlz_per_rank, system.MAX_TIME_STEP + 1]
+ ),
+ 'Demand': np.zeros(
+ [num_of_rlz_per_rank, system.MAX_TIME_STEP + 1]
+ ),
+ 'Consumption': np.zeros(
+ [num_of_rlz_per_rank, system.MAX_TIME_STEP + 1]
+ ),
+ }
}
- })
+ )
# Initialize result_agg
- result_agg_buffer = dict()
+ result_agg_buffer = dict() # noqa: C408
for asset_type, item in results_det.items():
- asset_type_result = dict()
+ asset_type_result = dict() # noqa: C408
for asset_subtype, asset_subtype_item in item.items():
- asset_subtype_result = dict()
- for aim_id, aim in asset_subtype_item.items():
- asset_subtype_result.update({aim_id:{
- "RecoveryDuration":np.zeros(num_of_rlz_per_rank)
- }})
- asset_type_result.update({asset_subtype:asset_subtype_result})
- result_agg_buffer.update({asset_type:asset_type_result})
+ asset_subtype_result = dict() # noqa: C408
+ for aim_id, aim in asset_subtype_item.items(): # noqa: B007
+ asset_subtype_result.update(
+ {
+ aim_id: {
+ 'RecoveryDuration': np.zeros(
+ num_of_rlz_per_rank
+ )
+ }
+ }
+ )
+ asset_type_result.update(
+ {asset_subtype: asset_subtype_result}
+ )
+ result_agg_buffer.update({asset_type: asset_type_result})
del results_det
-
- resilience_result_rlz_i = dict()
+ resilience_result_rlz_i = dict() # noqa: C408
for resource_name in resources_to_plot:
- resilience_result_rlz_i.update({
- "time_steps": list(range(0, system.time_step+1)),
+ resilience_result_rlz_i.update(
+ {
+ 'time_steps': list(range(0, system.time_step + 1)), # noqa: PIE808
resource_name: {
- "Supply": system.resilience_calculators[resilience_calculator_id].system_supply[resource_name][:system.time_step+1],
- "Demand": system.resilience_calculators[resilience_calculator_id].system_demand[resource_name][:system.time_step+1],
- "Consumption": system.resilience_calculators[resilience_calculator_id].system_consumption[resource_name][:system.time_step+1]
- }
+ 'Supply': system.resilience_calculators[
+ resilience_calculator_id
+ ].system_supply[resource_name][: system.time_step + 1],
+ 'Demand': system.resilience_calculators[
+ resilience_calculator_id
+ ].system_demand[resource_name][: system.time_step + 1],
+ 'Consumption': system.resilience_calculators[
+ resilience_calculator_id
+ ].system_consumption[resource_name][
+ : system.time_step + 1
+ ],
+ },
}
- )
- resilience_results_buffer[resource_name]['Supply'][ind_in_rank,:system.time_step+1] = \
- system.resilience_calculators[resilience_calculator_id].system_supply[resource_name][:system.time_step+1]
- resilience_results_buffer[resource_name]['Demand'][ind_in_rank,:system.time_step+1] = \
- system.resilience_calculators[resilience_calculator_id].system_demand[resource_name][:system.time_step+1]
- resilience_results_buffer[resource_name]['Consumption'][ind_in_rank,:system.time_step+1] = \
- system.resilience_calculators[resilience_calculator_id].system_consumption[resource_name][:system.time_step+1]
- resilience_result_rlz_i_file = os.path.join(rlz_dir, "ResilienceResult.json")
- with open(resilience_result_rlz_i_file, 'w') as f:
+ )
+ resilience_results_buffer[resource_name]['Supply'][
+ ind_in_rank, : system.time_step + 1
+ ] = system.resilience_calculators[
+ resilience_calculator_id
+ ].system_supply[resource_name][: system.time_step + 1]
+ resilience_results_buffer[resource_name]['Demand'][
+ ind_in_rank, : system.time_step + 1
+ ] = system.resilience_calculators[
+ resilience_calculator_id
+ ].system_demand[resource_name][: system.time_step + 1]
+ resilience_results_buffer[resource_name]['Consumption'][
+ ind_in_rank, : system.time_step + 1
+ ] = system.resilience_calculators[
+ resilience_calculator_id
+ ].system_consumption[resource_name][: system.time_step + 1]
+ resilience_result_rlz_i_file = os.path.join( # noqa: PTH118
+ rlz_dir, 'ResilienceResult.json'
+ )
+ with open(resilience_result_rlz_i_file, 'w') as f: # noqa: PTH123
ujson.dump(resilience_result_rlz_i, f)
- result_file_name = os.path.join(inputRWHALE['runDir'],"Results",
- f"Results_{rlz_ind}.json")
- with open(result_file_name, 'r') as f:
+ result_file_name = os.path.join( # noqa: PTH118
+ inputRWHALE['runDir'],
+ 'Results',
+ f'Results_{rlz_ind}.json',
+ )
+ with open(result_file_name, 'r') as f: # noqa: PTH123, UP015
results = json.load(f)
for comp in system.components:
if getattr(comp, 'r2d_comp', False) is True:
- recovery_duration = getattr(comp, 'recoverd_time_step',system.MAX_TIME_STEP) - \
- system.DISASTER_TIME_STEP
+ recovery_duration = (
+ getattr(comp, 'recoverd_time_step', system.MAX_TIME_STEP)
+ - system.DISASTER_TIME_STEP
+ )
recovery_duration = max(0, recovery_duration)
- results[comp.asset_type][comp.asset_subtype][comp.aim_id].update({
- "Recovery": {"Duration":recovery_duration}
- })
- result_agg_buffer[comp.asset_type][comp.asset_subtype][comp.aim_id]\
- ['RecoveryDuration'][ind_in_rank] = recovery_duration
- with open(result_file_name, 'w') as f:
+ results[comp.asset_type][comp.asset_subtype][comp.aim_id].update(
+ {'Recovery': {'Duration': recovery_duration}}
+ )
+ result_agg_buffer[comp.asset_type][comp.asset_subtype][
+ comp.aim_id
+ ]['RecoveryDuration'][ind_in_rank] = recovery_duration
+ with open(result_file_name, 'w') as f: # noqa: PTH123
ujson.dump(results, f)
ind_in_rank += 1
@@ -188,127 +247,194 @@ def run_pyrecodes(rec_config, inputRWHALE, parallelType, mpiExec, numPROC):
if doParallel:
# gather results_agg
for asset_type, item in result_agg_buffer.items():
- asset_type_result = dict()
+ asset_type_result = dict() # noqa: C408
for asset_subtype, asset_subtype_item in item.items():
- asset_subtype_result = dict()
- for aim_id, aim in asset_subtype_item.items():
- asset_subtype_result.update({aim_id:{
- "RecoveryDuration":comm.gather(result_agg_buffer[asset_type][asset_subtype], root=0)
- }})
- asset_type_result.update({asset_subtype:asset_subtype_result})
- result_agg.update({asset_type:asset_type_result})
+ asset_subtype_result = dict() # noqa: C408
+ for aim_id, aim in asset_subtype_item.items(): # noqa: B007
+ asset_subtype_result.update(
+ {
+ aim_id: {
+ 'RecoveryDuration': comm.gather(
+ result_agg_buffer[asset_type][asset_subtype],
+ root=0,
+ )
+ }
+ }
+ )
+ asset_type_result.update({asset_subtype: asset_subtype_result})
+ result_agg.update({asset_type: asset_type_result})
# gather resilience_resutls
for resource_name in resources_to_plot:
if procID == 0:
- resilience_results.update({
- resource_name: {
- "Supply": np.zeros([len(realizations_to_run), system.MAX_TIME_STEP+1]),
- "Demand": np.zeros([len(realizations_to_run), system.MAX_TIME_STEP+1]),
- "Consumption": np.zeros([len(realizations_to_run), system.MAX_TIME_STEP+1])
+ resilience_results.update(
+ {
+ resource_name: {
+ 'Supply': np.zeros(
+ [len(realizations_to_run), system.MAX_TIME_STEP + 1]
+ ),
+ 'Demand': np.zeros(
+ [len(realizations_to_run), system.MAX_TIME_STEP + 1]
+ ),
+ 'Consumption': np.zeros(
+ [len(realizations_to_run), system.MAX_TIME_STEP + 1]
+ ),
+ }
}
- })
- comm.gather(resilience_results_buffer[resource_name]["Supply"],
- resilience_results[resource_name]["Supply"], root=0)
- comm.gather(resilience_results_buffer[resource_name]["Demand"],
- resilience_results[resource_name]["Demand"], root=0)
- comm.gather(resilience_results_buffer[resource_name]["Consumption"],
- resilience_results[resource_name]["Consumption"], root=0)
+ )
+ comm.gather(
+ resilience_results_buffer[resource_name]['Supply'],
+ resilience_results[resource_name]['Supply'],
+ root=0,
+ )
+ comm.gather(
+ resilience_results_buffer[resource_name]['Demand'],
+ resilience_results[resource_name]['Demand'],
+ root=0,
+ )
+ comm.gather(
+ resilience_results_buffer[resource_name]['Consumption'],
+ resilience_results[resource_name]['Consumption'],
+ root=0,
+ )
else:
- for resource_name in resources_to_plot:
- resilience_results.update({
- resource_name: resilience_results_buffer[resource_name]
- })
+ for resource_name in resources_to_plot:
+ resilience_results.update(
+ {resource_name: resilience_results_buffer[resource_name]}
+ )
result_agg = result_agg_buffer
- if procID==0:
- # Calculate stats of the results and add to results_det.json
- with open(result_det_path, 'r') as f:
+ if procID == 0:
+ # Calculate stats of the results and add to results_det.json
+ with open(result_det_path, 'r') as f: # noqa: PTH123, UP015
results_det = json.load(f)
for asset_type, item in result_agg.items():
for asset_subtype, asset_subtype_item in item.items():
for aim_id, aim in asset_subtype_item.items():
- if 'R2Dres' not in results_det[asset_type][asset_subtype][aim_id].keys():
- results_det[asset_type][asset_subtype][aim_id].update({'R2Dres':{}})
- results_det[asset_type][asset_subtype][aim_id]['R2Dres'].update({
- "R2Dres_mean_RecoveryDuration":aim['RecoveryDuration'].mean(),
- "R2Dres_std_RecoveryDuration":aim['RecoveryDuration'].std()
- })
- with open(result_det_path, 'w') as f:
+ if (
+ 'R2Dres' # noqa: SIM118
+ not in results_det[asset_type][asset_subtype][aim_id].keys()
+ ):
+ results_det[asset_type][asset_subtype][aim_id].update(
+ {'R2Dres': {}}
+ )
+ results_det[asset_type][asset_subtype][aim_id]['R2Dres'].update(
+ {
+ 'R2Dres_mean_RecoveryDuration': aim[
+ 'RecoveryDuration'
+ ].mean(),
+ 'R2Dres_std_RecoveryDuration': aim[
+ 'RecoveryDuration'
+ ].std(),
+ }
+ )
+ with open(result_det_path, 'w') as f: # noqa: PTH123
ujson.dump(results_det, f)
-
- recovery_result_path = os.path.join(rec_ouput_dir, "ResilienceResult.json")
- for resource_name in resources_to_plot:
- resilience_results[resource_name].update({
- 'R2Dres_mean_Supply':resilience_results[resource_name]['Supply'].mean(axis=0).tolist(),
- 'R2Dres_std_Supply':resilience_results[resource_name]['Supply'].std(axis=0).tolist(),
- 'R2Dres_mean_Demand':resilience_results[resource_name]['Demand'].mean(axis=0).tolist(),
- 'R2Dres_std_Demand':resilience_results[resource_name]['Demand'].std(axis=0).tolist(),
- 'R2Dres_mean_Consumption':resilience_results[resource_name]['Consumption'].mean(axis=0).tolist(),
- 'R2Dres_std_Consumption':resilience_results[resource_name]['Consumption'].std(axis=0).tolist()
- })
- resilience_results[resource_name].pop("Supply")
- resilience_results[resource_name].pop("Demand")
- resilience_results[resource_name].pop("Consumption")
-
-
- with open(recovery_result_path, 'w') as f:
+ recovery_result_path = os.path.join(rec_ouput_dir, 'ResilienceResult.json') # noqa: PTH118
+ for resource_name in resources_to_plot:
+ resilience_results[resource_name].update(
+ {
+ 'R2Dres_mean_Supply': resilience_results[resource_name]['Supply']
+ .mean(axis=0)
+ .tolist(),
+ 'R2Dres_std_Supply': resilience_results[resource_name]['Supply']
+ .std(axis=0)
+ .tolist(),
+ 'R2Dres_mean_Demand': resilience_results[resource_name]['Demand']
+ .mean(axis=0)
+ .tolist(),
+ 'R2Dres_std_Demand': resilience_results[resource_name]['Demand']
+ .std(axis=0)
+ .tolist(),
+ 'R2Dres_mean_Consumption': resilience_results[resource_name][
+ 'Consumption'
+ ]
+ .mean(axis=0)
+ .tolist(),
+ 'R2Dres_std_Consumption': resilience_results[resource_name][
+ 'Consumption'
+ ]
+ .std(axis=0)
+ .tolist(),
+ }
+ )
+ resilience_results[resource_name].pop('Supply')
+ resilience_results[resource_name].pop('Demand')
+ resilience_results[resource_name].pop('Consumption')
+
+ with open(recovery_result_path, 'w') as f: # noqa: PTH123
ujson.dump(resilience_results, f)
# Below are for development use
- from pyrecodes import GeoVisualizer as gvis
+ from pyrecodes import GeoVisualizer as gvis # noqa: N813
+
geo_visualizer = gvis.R2D_GeoVisualizer(system.components)
geo_visualizer.plot_component_localities()
from pyrecodes import Plotter
+
plotter_object = Plotter.Plotter()
x_axis_label = 'Time step [day]'
- resources_to_plot = ['Shelter', 'FunctionalHousing', 'ElectricPower', 'PotableWater']
+ resources_to_plot = [
+ 'Shelter',
+ 'FunctionalHousing',
+ 'ElectricPower',
+ 'PotableWater',
+ ]
resource_units = ['[beds/day]', '[beds/day]', '[MWh/day]', '[RC/day]']
# define which resilience calculator to use to plot the supply/demand/consumption of the resources
# they are ordered as in the system configuration file
resilience_calculator_id = 0
- for i, resource_name in enumerate(resources_to_plot):
+ for i, resource_name in enumerate(resources_to_plot):
y_axis_label = f'{resource_name} {resource_units[i]} | {system.resilience_calculators[resilience_calculator_id].scope}'
- axis_object = plotter_object.setup_lor_plot_fig(x_axis_label, y_axis_label)
- time_range = system.time_step+1
- time_steps_before_event = 10 #
- plotter_object.plot_single_resource(list(range(-time_steps_before_event, time_range)),
- resilience_results[resource_name]['R2Dres_mean_Supply'][:time_range],
- resilience_results[resource_name]['R2Dres_mean_Demand'][:time_range],
- resilience_results[resource_name]['R2Dres_mean_Consumption'][:time_range],
- axis_object, warmup=time_steps_before_event)
- print()
-def create_system_configuration(rec_config):
+ axis_object = plotter_object.setup_lor_plot_fig(x_axis_label, y_axis_label)
+ time_range = system.time_step + 1
+ time_steps_before_event = 10
+ plotter_object.plot_single_resource(
+ list(range(-time_steps_before_event, time_range)),
+ resilience_results[resource_name]['R2Dres_mean_Supply'][:time_range],
+ resilience_results[resource_name]['R2Dres_mean_Demand'][:time_range],
+ resilience_results[resource_name]['R2Dres_mean_Consumption'][
+ :time_range
+ ],
+ axis_object,
+ warmup=time_steps_before_event,
+ )
+ print() # noqa: T201
+
+
+def create_system_configuration(rec_config): # noqa: D103
content_config = rec_config.pop('Content')
system_configuration = rec_config.copy()
if content_config['Creator'] == 'FromJsonFile':
- with open(content_config['FilePath'], 'r') as f:
+ with open(content_config['FilePath'], 'r') as f: # noqa: PTH123, UP015
content = json.load(f)
- system_configuration.update({"Content":content})
+ system_configuration.update({'Content': content})
elif content_config['Creator'] == 'LocalityGeoJSON':
# think how users can input RecoveryResourceSupplier and Resources
pass
-
return system_configuration
-def select_realizations_to_run(damage_input, inputRWHALE):
- rlzs_num = min([item['ApplicationData']['Realizations'] \
- for _, item in inputRWHALE['Applications']['DL'].items()])
+def select_realizations_to_run(damage_input, inputRWHALE): # noqa: N803, D103
+ rlzs_num = min(
+ [
+ item['ApplicationData']['Realizations']
+ for _, item in inputRWHALE['Applications']['DL'].items()
+ ]
+ )
rlzs_available = np.array(range(rlzs_num))
if damage_input['Type'] == 'R2DDamageRealization':
rlz_filter = damage_input['Parameters']['Filter']
rlzs_requested = []
for rlzs in rlz_filter.split(','):
- if "-" in rlzs:
- rlzs_low, rlzs_high = rlzs.split("-")
- rlzs_requested += list(range(int(rlzs_low), int(rlzs_high)+1))
+ if '-' in rlzs:
+ rlzs_low, rlzs_high = rlzs.split('-')
+ rlzs_requested += list(range(int(rlzs_low), int(rlzs_high) + 1))
else:
rlzs_requested.append(int(rlzs))
rlzs_requested = np.array(rlzs_requested)
- rlzs_in_available = np.in1d(rlzs_requested, rlzs_available)
+ rlzs_in_available = np.in1d(rlzs_requested, rlzs_available) # noqa: NPY201
if rlzs_in_available.sum() != 0:
- rlzs_to_run = rlzs_requested[
- np.where(rlzs_in_available)[0]]
+ rlzs_to_run = rlzs_requested[np.where(rlzs_in_available)[0]]
else:
rlzs_to_run = []
if damage_input['Type'] == 'R2DDamageSample':
@@ -316,50 +442,64 @@ def select_realizations_to_run(damage_input, inputRWHALE):
seed = damage_input['Parameters']['SampleSize']
if sample_size < rlzs_num:
np.random.seed(seed)
- rlzs_to_run = np.sort(np.random.choice(rlzs_available, sample_size,\
- replace = False)).tolist()
+ rlzs_to_run = np.sort(
+ np.random.choice(rlzs_available, sample_size, replace=False)
+ ).tolist()
else:
rlzs_to_run = np.sort(rlzs_available).tolist()
return rlzs_to_run
-if __name__ == '__main__':
- #Defining the command line arguments
+if __name__ == '__main__':
+ # Defining the command line arguments
- workflowArgParser = argparse.ArgumentParser(
- "Run Pyrecodes from the NHERI SimCenter rWHALE workflow for a set of assets.",
- allow_abbrev=False)
+ workflowArgParser = argparse.ArgumentParser( # noqa: N816
+ 'Run Pyrecodes from the NHERI SimCenter rWHALE workflow for a set of assets.',
+ allow_abbrev=False,
+ )
- workflowArgParser.add_argument("-c", "--configJsonPath",
- help="Configuration file for running perycode")
- workflowArgParser.add_argument("-i", "--inputRWHALEPath",
- help="Configuration file specifying the rwhale applications and data "
- "used")
- workflowArgParser.add_argument("-p", "--parallelType",
+ workflowArgParser.add_argument(
+ '-c', '--configJsonPath', help='Configuration file for running perycode'
+ )
+ workflowArgParser.add_argument(
+ '-i',
+ '--inputRWHALEPath',
+ help='Configuration file specifying the rwhale applications and data '
+ 'used',
+ )
+ workflowArgParser.add_argument(
+ '-p',
+ '--parallelType',
default='seqRUN',
- help="How parallel runs: options seqRUN, parSETUP, parRUN")
- workflowArgParser.add_argument("-m", "--mpiexec",
+ help='How parallel runs: options seqRUN, parSETUP, parRUN',
+ )
+ workflowArgParser.add_argument(
+ '-m',
+ '--mpiexec',
default='mpiexec',
- help="How mpi runs, e.g. ibrun, mpirun, mpiexec")
- workflowArgParser.add_argument("-n", "--numP",
+ help='How mpi runs, e.g. ibrun, mpirun, mpiexec',
+ )
+ workflowArgParser.add_argument(
+ '-n',
+ '--numP',
default='8',
- help="If parallel, how many jobs to start with mpiexec option")
+ help='If parallel, how many jobs to start with mpiexec option',
+ )
- #Parsing the command line arguments
- wfArgs = workflowArgParser.parse_args()
+ # Parsing the command line arguments
+ wfArgs = workflowArgParser.parse_args() # noqa: N816
- #Calling the main workflow method and passing the parsed arguments
- numPROC = int(wfArgs.numP)
+ # Calling the main workflow method and passing the parsed arguments
+ numPROC = int(wfArgs.numP) # noqa: N816
- with open(Path(wfArgs.configJsonPath).resolve(), 'r') as f:
+ with open(Path(wfArgs.configJsonPath).resolve(), 'r') as f: # noqa: PTH123, UP015
rec_config = json.load(f)
- with open(Path(wfArgs.inputRWHALEPath).resolve(), 'r') as f:
- inputRWHALE = json.load(f)
-
- run_pyrecodes(rec_config=rec_config,\
- inputRWHALE=inputRWHALE,
- parallelType = wfArgs.parallelType,
- mpiExec = wfArgs.mpiexec,
- numPROC = numPROC)
-
-
\ No newline at end of file
+ with open(Path(wfArgs.inputRWHALEPath).resolve(), 'r') as f: # noqa: PTH123, UP015
+ inputRWHALE = json.load(f) # noqa: N816
+ run_pyrecodes(
+ rec_config=rec_config,
+ inputRWHALE=inputRWHALE,
+ parallelType=wfArgs.parallelType,
+ mpiExec=wfArgs.mpiexec,
+ numPROC=numPROC,
+ )
diff --git a/modules/performRegionalEventSimulation/DummyEventApp/DEA.py b/modules/performRegionalEventSimulation/DummyEventApp/DEA.py
index 55e68fe54..11cc8570d 100644
--- a/modules/performRegionalEventSimulation/DummyEventApp/DEA.py
+++ b/modules/performRegionalEventSimulation/DummyEventApp/DEA.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -40,8 +40,8 @@
import argparse
-def simulate_event(in1, in2):
- print(f'SIMULATING EVENT: {in1}, {in2}')
+def simulate_event(in1, in2): # noqa: D103
+ print(f'SIMULATING EVENT: {in1}, {in2}') # noqa: T201
if __name__ == '__main__':
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py
index b08191b2b..183b4d7ce 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ComputeIntensityMeasure.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -94,27 +94,27 @@
IM_GMPE = {'LOCAL': LOCAL_IM_GMPE, 'OPENSHA': OPENSHA_IM_GMPE}
-import collections
-import json
-import os
-import socket
-import sys
-import time
-from pathlib import Path
+import collections # noqa: E402
+import json # noqa: E402
+import os # noqa: E402
+import socket # noqa: E402
+import sys # noqa: E402
+import time # noqa: E402
+from pathlib import Path # noqa: E402
-import pandas as pd
-from gmpe import SignificantDurationModel, openSHAGMPE
-from tqdm import tqdm
+import pandas as pd # noqa: E402
+from gmpe import SignificantDurationModel, openSHAGMPE # noqa: E402
+from tqdm import tqdm # noqa: E402
if 'stampede2' not in socket.gethostname():
from FetchOpenQuake import get_site_rup_info_oq
- from FetchOpenSHA import *
-import threading
+ from FetchOpenSHA import * # noqa: F403
+import threading # noqa: E402
-import ujson
+import ujson # noqa: E402
-class IM_Calculator:
+class IM_Calculator: # noqa: D101
# Chiou & Youngs (2014) GMPE class
CY = None
# Abrahamson, Silvar, & Kamai (2014)
@@ -125,17 +125,17 @@ class IM_Calculator:
CB = None
# profile
- timeGetRuptureInfo = 0
- timeGetIM = 0
+ timeGetRuptureInfo = 0 # noqa: N815
+ timeGetIM = 0 # noqa: N815
def __init__(
self,
- source_info=dict(),
- im_dict=dict(),
- gmpe_dict=dict(),
- gmpe_weights_dict=dict(),
+ source_info=dict(), # noqa: B006, C408, ARG002
+ im_dict=dict(), # noqa: B006, C408
+ gmpe_dict=dict(), # noqa: B006, C408
+ gmpe_weights_dict=dict(), # noqa: B006, C408
im_type=None,
- site_info=dict(),
+ site_info=dict(), # noqa: B006, C408
):
# basic set-ups
self.set_im_gmpe(im_dict, gmpe_dict, gmpe_weights_dict)
@@ -143,11 +143,11 @@ def __init__(
self.set_sites(site_info)
# self.set_source(source_info)
- def set_source(self, source_info):
+ def set_source(self, source_info): # noqa: D102
# set seismic source
self.source_info = source_info.copy()
gmpe_list = set()
- for _, item in self.gmpe_dict.items():
+ for _, item in self.gmpe_dict.items(): # noqa: PERF102
gmpe_list = gmpe_list.union(set(item))
if source_info['Type'] == 'ERF':
if (
@@ -159,7 +159,7 @@ def set_source(self, source_info):
source_index = source_info.get('SourceIndex', None)
rupture_index = source_info.get('RuptureIndex', None)
# start = time.process_time_ns()
- site_rup_dict, station_info = get_rupture_info_CY2014(
+ site_rup_dict, station_info = get_rupture_info_CY2014( # noqa: F405
self.erf, source_index, rupture_index, self.site_info
)
# self.timeGetRuptureInfo += time.process_time_ns() - start
@@ -171,7 +171,7 @@ def set_source(self, source_info):
or 'Campbell & Bozorgnia (2014)' in gmpe_list
):
# start = time.process_time_ns()
- site_rup_dict, station_info = get_PointSource_info_CY2014(
+ site_rup_dict, station_info = get_PointSource_info_CY2014( # noqa: F405
source_info, self.site_info
)
# self.timeGetRuptureInfo += time.process_time_ns() - start
@@ -190,43 +190,43 @@ def set_source(self, source_info):
self.site_rup_dict = site_rup_dict
self.site_info = station_info
- def set_im_gmpe(self, im_dict, gmpe_dict, gmpe_weights_dict):
+ def set_im_gmpe(self, im_dict, gmpe_dict, gmpe_weights_dict): # noqa: D102
# set im and gmpe information
self.im_dict = im_dict.copy()
self.gmpe_dict = gmpe_dict.copy()
self.gmpe_weights_dict = gmpe_weights_dict.copy()
- def set_im_type(self, im_type):
+ def set_im_type(self, im_type): # noqa: D102
# set im type
if im_type is None:
self.im_type = None
elif list(self.im_dict.keys()) and (
im_type not in list(self.im_dict.keys())
):
- print(
+ print( # noqa: T201
f'IM_Calculator.set_im_type: warning - {im_type} is not in the defined IM lists.'
)
self.im_type = None
else:
self.im_type = im_type
- def set_sites(self, site_info):
+ def set_sites(self, site_info): # noqa: D102
# set sites
self.site_info = site_info
- def calculate_im(self):
+ def calculate_im(self): # noqa: C901, D102
# set up intensity measure calculations
# current im type
im_type = self.im_type
if im_type is None:
- print('IM_Calculator.calculate_im: error - no IM type found.')
+ print('IM_Calculator.calculate_im: error - no IM type found.') # noqa: T201
return None
# get current im dict
cur_im_dict = self.im_dict.get(im_type)
# get gmpe list
gmpe_list = self.gmpe_dict.get(im_type, None)
if gmpe_list is None:
- print(
+ print( # noqa: T201
f'IM_Calculator.calculate_im: error - no GMPE list found for {im_type}.'
)
return None
@@ -251,7 +251,7 @@ def calculate_im(self):
else:
gmpe_weights_list_opensha = None
else:
- print(
+ print( # noqa: T201
f'IM_Calculator.calculate_im: error - {cur_gmpe} is not supported.'
)
return None
@@ -265,7 +265,7 @@ def calculate_im(self):
gmpe_weights=gmpe_weights_list_local,
)
else:
- res_local = dict()
+ res_local = dict() # noqa: C408
if len(gmpe_list_opensha) > 0:
res_opensha = self.get_im_from_opensha(
self.source_info,
@@ -278,14 +278,14 @@ def calculate_im(self):
gmpe_weights=gmpe_weights_list_opensha,
)
else:
- res_opensha = dict()
+ res_opensha = dict() # noqa: C408
# collect/combine im results
if len(res_local) + len(res_opensha) == 0:
- print(
+ print( # noqa: T201
'IM_Calculator.calculate_im: error - no results available... please check GMPE availability'
)
- return dict()
+ return dict() # noqa: C408
if len(res_local) == 0:
res = res_opensha
elif len(res_opensha) == 0:
@@ -299,7 +299,7 @@ def calculate_im(self):
# return
return res
- def get_im_from_opensha(
+ def get_im_from_opensha( # noqa: D102
self,
source_info,
gmpe_list,
@@ -312,16 +312,16 @@ def get_im_from_opensha(
):
# Computing IM
res_list = []
- res = dict()
+ res = dict() # noqa: C408
curgmpe_info = {}
station_list = station_info.get('SiteList')
im_info.update({'Type': im_type})
for cur_gmpe in gmpe_list:
# set up site properties
- siteSpec, sites, site_prop = get_site_prop(cur_gmpe, station_list)
+ siteSpec, sites, site_prop = get_site_prop(cur_gmpe, station_list) # noqa: N806, F405
curgmpe_info['Type'] = cur_gmpe
curgmpe_info['Parameters'] = gmpe_para
- cur_res, station_info = get_IM(
+ cur_res, station_info = get_IM( # noqa: F405
curgmpe_info,
erf,
sites,
@@ -341,7 +341,7 @@ def get_im_from_opensha(
# return
return res
- def get_im_from_local(
+ def get_im_from_local( # noqa: C901, D102
self,
source_info,
gmpe_list,
@@ -351,33 +351,33 @@ def get_im_from_local(
):
# initiate
res_list = []
- res = dict()
+ res = dict() # noqa: C408
# check IM type
if im_type not in list(LOCAL_IM_GMPE.keys()):
- print(
+ print( # noqa: T201
f'ComputeIntensityMeasure.get_im_from_local: error - IM type {im_type} not supported'
)
return res
# get available gmpe list
avail_gmpe = LOCAL_IM_GMPE.get(im_type)
# back compatibility for now (useful if other local GMPEs for SA is included)
- cur_T = im_info.get('Periods', None)
+ cur_T = im_info.get('Periods', None) # noqa: N806
# source and rupture
if source_info['Type'] == 'PointSource':
# magnitude
eq_magnitude = source_info['Magnitude']
- eq_loc = [
+ eq_loc = [ # noqa: F841
source_info['Location']['Latitude'],
source_info['Location']['Longitude'],
source_info['Location']['Depth'],
]
# maf
- meanAnnualRate = None
+ meanAnnualRate = None # noqa: N806
elif source_info['Type'] == 'ERF':
source_index = source_info.get('SourceIndex', None)
rupture_index = source_info.get('RuptureIndex', None)
if None in [source_index, rupture_index]:
- print(
+ print( # noqa: T201
'ComputeIntensityMeasure.get_im_from_local: error - source/rupture index not given.'
)
return res
@@ -387,21 +387,21 @@ def get_im_from_local(
# maf
# timeSpan = erf.getTimeSpan()
# meanAnnualRate = erf.getSource(source_index).getRupture(rupture_index).getMeanAnnualRate(timeSpan.getDuration())
- meanAnnualRate = source_info['MeanAnnualRate']
+ meanAnnualRate = source_info['MeanAnnualRate'] # noqa: N806
elif source_info['Type'] == 'oqSourceXML':
source_index = source_info.get('SourceIndex', None)
rupture_index = source_info.get('RuptureIndex', None)
if None in [source_index, rupture_index]:
- print(
+ print( # noqa: T201
'ComputeIntensityMeasure.get_im_from_local: error - source/rupture index not given.'
)
return res
# magnitude
eq_magnitude = source_info['Magnitude']
# maf
- meanAnnualRate = source_info['MeanAnnualRate']
+ meanAnnualRate = source_info['MeanAnnualRate'] # noqa: N806
else:
- print(
+ print( # noqa: T201
'ComputeIntensityMeasure.get_im_from_local: error - source type {} not supported'.format(
source_info['Type']
)
@@ -410,7 +410,7 @@ def get_im_from_local(
for cur_gmpe in gmpe_list:
gm_collector = []
if cur_gmpe not in avail_gmpe:
- print(
+ print( # noqa: T201
f'ComputeIntensityMeasure.get_im_from_local: warning - {cur_gmpe} is not available.'
)
continue
@@ -418,14 +418,14 @@ def get_im_from_local(
# current site-rupture distance
cur_dist = cur_site['rRup']
cur_vs30 = cur_site['vs30']
- tmpResult = {
+ tmpResult = { # noqa: N806
'Mean': [],
'TotalStdDev': [],
'InterEvStdDev': [],
'IntraEvStdDev': [],
}
if cur_gmpe == 'Bommer, Stafford & Alarcon (2009)':
- mean, stdDev, interEvStdDev, intraEvStdDev = (
+ mean, stdDev, interEvStdDev, intraEvStdDev = ( # noqa: N806
SignificantDurationModel.bommer_stafford_alarcon_ds_2009(
magnitude=eq_magnitude,
distance=cur_dist,
@@ -438,7 +438,7 @@ def get_im_from_local(
tmpResult['InterEvStdDev'].append(float(interEvStdDev))
tmpResult['IntraEvStdDev'].append(float(intraEvStdDev))
elif cur_gmpe == 'Afshari & Stewart (2016)':
- mean, stdDev, interEvStdDev, intraEvStdDev = (
+ mean, stdDev, interEvStdDev, intraEvStdDev = ( # noqa: N806
SignificantDurationModel.afshari_stewart_ds_2016(
magnitude=eq_magnitude,
distance=cur_dist,
@@ -452,30 +452,30 @@ def get_im_from_local(
tmpResult['IntraEvStdDev'].append(float(intraEvStdDev))
elif cur_gmpe == 'Chiou & Youngs (2014)':
# start = time.process_time_ns()
- tmpResult = self.CY.get_IM(
+ tmpResult = self.CY.get_IM( # noqa: N806
eq_magnitude, self.site_rup_dict, cur_site, im_info
)
# self.timeGetIM += time.process_time_ns() - start
elif cur_gmpe == 'Abrahamson, Silva & Kamai (2014)':
# start = time.process_time_ns()
- tmpResult = self.ASK.get_IM(
+ tmpResult = self.ASK.get_IM( # noqa: N806
eq_magnitude, self.site_rup_dict, cur_site, im_info
)
# self.timeGetIM += time.process_time_ns() - start
elif cur_gmpe == 'Boore, Stewart, Seyhan & Atkinson (2014)':
# start = time.process_time_ns()
- tmpResult = self.BSSA.get_IM(
+ tmpResult = self.BSSA.get_IM( # noqa: N806
eq_magnitude, self.site_rup_dict, cur_site, im_info
)
# self.timeGetIM += time.process_time_ns() - start
elif cur_gmpe == 'Campbell & Bozorgnia (2014)':
# start = time.process_time_ns()
- tmpResult = self.CB.get_IM(
+ tmpResult = self.CB.get_IM( # noqa: N806
eq_magnitude, self.site_rup_dict, cur_site, im_info
)
# self.timeGetIM += time.process_time_ns() - start
else:
- print(
+ print( # noqa: T201
f'ComputeIntensityMeasure.get_im_from_local: gmpe_name {cur_gmpe} is not supported.'
)
# collect sites
@@ -508,18 +508,18 @@ def get_im_from_local(
return res
-def collect_multi_im_res(res_dict):
+def collect_multi_im_res(res_dict): # noqa: C901, D103
res_list = []
- if 'PGA' in res_dict.keys():
+ if 'PGA' in res_dict.keys(): # noqa: SIM118
res_list.append(res_dict['PGA'])
- if 'SA' in res_dict.keys():
+ if 'SA' in res_dict.keys(): # noqa: SIM118
res_list.append(res_dict['SA'])
- if 'PGV' in res_dict.keys():
+ if 'PGV' in res_dict.keys(): # noqa: SIM118
res_list.append(res_dict['PGV'])
- res = dict()
+ res = dict() # noqa: C408
num_res = len(res_list)
if num_res == 0:
- print('IM_Calculator._collect_res: error - the res_list is empty')
+ print('IM_Calculator._collect_res: error - the res_list is empty') # noqa: T201
return res
for i, cur_res in enumerate(res_list):
if i == 0:
@@ -552,16 +552,16 @@ def collect_multi_im_res(res_dict):
return res
-def collect_multi_im_res_hdf5(res_list, im_list):
- res = dict()
+def collect_multi_im_res_hdf5(res_list, im_list): # noqa: D103
+ res = dict() # noqa: C408
num_res = len(res_list)
if num_res == 0:
- print('IM_Calculator._collect_res: error - the res_list is empty')
+ print('IM_Calculator._collect_res: error - the res_list is empty') # noqa: T201
return res
- num_sites = len(res_list[list(res_list.keys())[0]]['GroundMotions'])
+ num_sites = len(res_list[list(res_list.keys())[0]]['GroundMotions']) # noqa: RUF015
collected_mean = np.zeros([num_sites, len(im_list)])
- collected_intraStd = np.zeros([num_sites, len(im_list)])
- collected_interStd = np.zeros([num_sites, len(im_list)])
+ collected_intraStd = np.zeros([num_sites, len(im_list)]) # noqa: N806
+ collected_interStd = np.zeros([num_sites, len(im_list)]) # noqa: N806
for i, im in enumerate(im_list):
if im.startswith('PGA'):
collected_mean[:, i] = np.array(
@@ -623,12 +623,12 @@ def collect_multi_im_res_hdf5(res_list, im_list):
return res
-def get_im_dict(im_info):
+def get_im_dict(im_info): # noqa: D103
if im_info.get('Type', None) == 'Vector':
im_dict = im_info.copy()
im_dict.pop('Type')
- if 'PGV' in im_dict.keys():
- PGV_dict = im_dict.pop('PGV')
+ if 'PGV' in im_dict.keys(): # noqa: SIM118
+ PGV_dict = im_dict.pop('PGV') # noqa: N806
im_dict.update({'PGV': PGV_dict})
else:
# back compatibility
@@ -638,26 +638,26 @@ def get_im_dict(im_info):
return im_dict
-def get_gmpe_from_im_vector(im_info, gmpe_info):
- gmpe_dict = dict()
- gmpe_weights_dict = dict()
+def get_gmpe_from_im_vector(im_info, gmpe_info): # noqa: D103
+ gmpe_dict = dict() # noqa: C408
+ gmpe_weights_dict = dict() # noqa: C408
# check IM info type
if im_info.get('Type', None) != 'Vector':
- print(
+ print( # noqa: T201
'ComputeIntensityMeasure.get_gmpe_from_im_vector: error: IntensityMeasure Type should be Vector.'
)
return gmpe_dict, gmpe_weights_dict
- else:
+ else: # noqa: RET505
im_keys = list(im_info.keys())
im_keys.remove('Type')
for cur_im in im_keys:
cur_gmpe = im_info[cur_im].get('GMPE', None)
cur_weights = im_info[cur_im].get('GMPEWeights', None)
if cur_gmpe is None:
- print(
+ print( # noqa: T201
f'ComputeIntensityMeasure.get_gmpe_from_im_vector: warning: GMPE not found for {cur_im}'
)
- elif type(cur_gmpe) == str:
+ elif type(cur_gmpe) == str: # noqa: E721
if cur_gmpe == 'NGAWest2 2014 Averaged':
cur_gmpe = [
'Abrahamson, Silva & Kamai (2014)',
@@ -672,15 +672,15 @@ def get_gmpe_from_im_vector(im_info, gmpe_info):
gmpe_dict.update({cur_im: cur_gmpe})
gmpe_weights_dict.update({cur_im: cur_weights})
# global parameters if any
- gmpe_dict.update({'Parameters': gmpe_info.get('Parameters', dict())})
+ gmpe_dict.update({'Parameters': gmpe_info.get('Parameters', dict())}) # noqa: C408
# return
return gmpe_dict, gmpe_weights_dict
-def get_gmpe_from_im_legency(im_info, gmpe_info, gmpe_weights=None):
+def get_gmpe_from_im_legency(im_info, gmpe_info, gmpe_weights=None): # noqa: D103
# back compatibility for getting ims and gmpes
- gmpe_dict = dict()
- gmpe_weights_dict = dict()
+ gmpe_dict = dict() # noqa: C408
+ gmpe_weights_dict = dict() # noqa: C408
if gmpe_info['Type'] == 'NGAWest2 2014 Averaged':
gmpe_list = [
'Abrahamson, Silva & Kamai (2014)',
@@ -700,37 +700,37 @@ def get_gmpe_from_im_legency(im_info, gmpe_info, gmpe_weights=None):
gmpe_dict.update({im_type: gmpe_list})
gmpe_weights_dict = {im_type: gmpe_weights}
# global parameters if any
- gmpe_dict.update({'Parameters': gmpe_info.get('Parameters', dict())})
+ gmpe_dict.update({'Parameters': gmpe_info.get('Parameters', dict())}) # noqa: C408
# return
return gmpe_dict, gmpe_weights_dict
-def compute_im(
+def compute_im( # noqa: C901, D103
scenarios,
stations,
- EqRupture_info,
+ EqRupture_info, # noqa: N803
gmpe_info,
im_info,
generator_info,
output_dir,
filename='IntensityMeasureMeanStd.hdf5',
- mth_flag=True,
+ mth_flag=True, # noqa: FBT002
):
# Calling OpenSHA to compute median PSA
- if len(scenarios) < 10:
+ if len(scenarios) < 10: # noqa: PLR2004
filename = 'IntensityMeasureMeanStd.json'
- saveInJson = True
+ saveInJson = True # noqa: N806
im_raw = {}
else:
- saveInJson = False
- filename = os.path.join(output_dir, filename)
+ saveInJson = False # noqa: N806
+ filename = os.path.join(output_dir, filename) # noqa: PTH118
im_list = []
- if 'PGA' in im_info.keys():
+ if 'PGA' in im_info.keys(): # noqa: SIM118
im_list.append('PGA')
- if 'SA' in im_info.keys():
+ if 'SA' in im_info.keys(): # noqa: SIM118
for cur_period in im_info['SA']['Periods']:
- im_list.append(f'SA({cur_period!s})')
- if 'PGV' in im_info.keys():
+ im_list.append(f'SA({cur_period!s})') # noqa: PERF401
+ if 'PGV' in im_info.keys(): # noqa: SIM118
im_list.append('PGV')
# Stations
station_list = [
@@ -787,11 +787,11 @@ def compute_im(
site_info=stations,
)
if EqRupture_info['EqRupture']['Type'] == 'ERF':
- im_calculator.erf = getERF(EqRupture_info)
+ im_calculator.erf = getERF(EqRupture_info) # noqa: F405
else:
im_calculator.erf = None
gmpe_set = set()
- for _, item in gmpe_dict.items():
+ for _, item in gmpe_dict.items(): # noqa: PERF102
gmpe_set = gmpe_set.union(set(item))
for gmpe in gmpe_set:
if gmpe == 'Chiou & Youngs (2014)':
@@ -804,8 +804,8 @@ def compute_im(
im_calculator.CB = openSHAGMPE.campbell_bozorgnia_2014()
# for i in tqdm(range(len(scenarios.keys())), desc=f"Evaluate GMPEs for {len(scenarios.keys())} scenarios"):
# Initialize an hdf5 file for IMmeanStd
- if os.path.exists(filename):
- os.remove(filename)
+ if os.path.exists(filename): # noqa: PTH110
+ os.remove(filename) # noqa: PTH107
for i in tqdm(
range(len(scenarios.keys())),
desc=f'Evaluate GMPEs for {len(scenarios.keys())} scenarios',
@@ -817,17 +817,17 @@ def compute_im(
source_info = scenarios[key]
im_calculator.set_source(source_info)
# Computing IM
- res_list = dict()
+ res_list = dict() # noqa: C408
for cur_im_type in list(im_dict.keys()):
im_calculator.set_im_type(cur_im_type)
res_list.update({cur_im_type: im_calculator.calculate_im()})
# Collecting outputs
# collectedResult.update({'SourceIndex':source_info['SourceIndex'], 'RuptureIndex':source_info['RuptureIndex']})
if saveInJson:
- collectedResult = collect_multi_im_res(res_list)
+ collectedResult = collect_multi_im_res(res_list) # noqa: N806
im_raw.update({key: collectedResult})
else:
- collectedResult = collect_multi_im_res_hdf5(res_list, im_list)
+ collectedResult = collect_multi_im_res_hdf5(res_list, im_list) # noqa: N806
with h5py.File(filename, 'a') as f:
# Add a group named by the scenario index and has four dataset
# mean, totalSTd, interStd,itrastd
@@ -879,21 +879,21 @@ def compute_im(
# order the res_dict by id
res_ordered = collections.OrderedDict(sorted(res_dict.items()))
- for i, cur_res in res_ordered.items():
+ for i, cur_res in res_ordered.items(): # noqa: B007
im_raw.append(cur_res)
- print(
+ print( # noqa: T201
f'ComputeIntensityMeasure: mean and standard deviation of intensity measures {time.time() - t_start} sec'
)
if saveInJson:
- with open(filename, 'w') as f:
+ with open(filename, 'w') as f: # noqa: PTH123
ujson.dump(im_raw, f, indent=1)
# return
return filename, im_list
-def compute_im_para(
+def compute_im_para( # noqa: D103
ids,
scenario_infos,
im_dict,
@@ -902,8 +902,8 @@ def compute_im_para(
station_info,
res_dict,
):
- for i, id in enumerate(ids):
- print(f'ComputeIntensityMeasure: Scenario #{id + 1}.')
+ for i, id in enumerate(ids): # noqa: A001
+ print(f'ComputeIntensityMeasure: Scenario #{id + 1}.') # noqa: T201
scenario_info = scenario_infos[i]
# create a IM calculator
im_calculator = IM_Calculator(
@@ -928,7 +928,7 @@ def compute_im_para(
# return
-def export_im(
+def export_im( # noqa: C901, D103, PLR0912
stations,
im_list,
im_data,
@@ -952,7 +952,7 @@ def export_im(
num_scenarios = len(eq_data)
eq_data = np.array(eq_data)
# Saving large files to HDF while small files to JSON
- if num_scenarios > 100000:
+ if num_scenarios > 100000: # noqa: PLR2004
# Pandas DataFrame
h_scenarios = ['Scenario-' + str(x) for x in range(1, num_scenarios + 1)]
h_eq = [
@@ -966,10 +966,10 @@ def export_im(
]
for x in range(1, im_data[0][0, :, :].shape[1] + 1):
for y in im_list:
- h_eq.append('Record-' + str(x) + f'-{y}')
+ h_eq.append('Record-' + str(x) + f'-{y}') # noqa: PERF401
index = pd.MultiIndex.from_product([h_scenarios, h_eq])
columns = ['Site-' + str(x) for x in range(1, num_stations + 1)]
- df = pd.DataFrame(index=index, columns=columns, dtype=float)
+ df = pd.DataFrame(index=index, columns=columns, dtype=float) # noqa: PD901
# Data
for i in range(num_stations):
tmp = []
@@ -983,14 +983,14 @@ def export_im(
tmp.append(eq_data[j][3])
for x in np.ndarray.tolist(im_data[j][i, :, :].T):
for y in x:
- tmp.append(y)
+ tmp.append(y) # noqa: PERF402
df['Site-' + str(i + 1)] = tmp
# HDF output
- try:
- os.remove(os.path.join(output_dir, filename.replace('.json', '.h5')))
- except:
+ try: # noqa: SIM105
+ os.remove(os.path.join(output_dir, filename.replace('.json', '.h5'))) # noqa: PTH107, PTH118
+ except: # noqa: S110, E722
pass
- hdf = pd.HDFStore(os.path.join(output_dir, filename.replace('.json', '.h5')))
+ hdf = pd.HDFStore(os.path.join(output_dir, filename.replace('.json', '.h5'))) # noqa: PTH118
hdf.put('SiteIM', df, format='table', complib='zlib')
hdf.close()
else:
@@ -1006,7 +1006,7 @@ def export_im(
tmp.update({'IMS': im_list})
tmp_im = []
for j in range(num_scenarios):
- tmp_im.append(np.ndarray.tolist(im_data[j][i, :, :]))
+ tmp_im.append(np.ndarray.tolist(im_data[j][i, :, :])) # noqa: PERF401
if len(tmp_im) == 1:
# Simplifying the data structure if only one scenario exists
tmp_im = tmp_im[0]
@@ -1022,7 +1022,7 @@ def export_im(
ssd = cur_eq[2]
else:
ssd = 'N/A'
- if len(cur_eq) > 3 and cur_eq[3]:
+ if len(cur_eq) > 3 and cur_eq[3]: # noqa: PLR2004
srd = cur_eq[3]
else:
srd = 'N/A'
@@ -1036,7 +1036,7 @@ def export_im(
maf_out.append(tmp)
res = {'Station_lnIM': res, 'Earthquake_MAF': maf_out}
# save SiteIM.json
- with open(os.path.join(output_dir, filename), 'w') as f:
+ with open(os.path.join(output_dir, filename), 'w') as f: # noqa: PTH118, PTH123
json.dump(res, f, indent=2)
# export the event grid and station csv files
if csv_flag:
@@ -1048,7 +1048,7 @@ def export_im(
lon = [stations[j]['lon'] for j in range(len(stations))]
# vs30 = [stations[j]['vs30'] for j in range(len(stations))]
# zTR = [stations[j]['DepthToRock'] for j in range(len(stations))]
- df = pd.DataFrame(
+ df = pd.DataFrame( # noqa: PD901
{
'GP_file': station_name,
'Longitude': lon,
@@ -1059,37 +1059,37 @@ def export_im(
)
# if cur_eq[2]:
# df['SiteSourceDistance'] = cur_eq[2]
- output_dir = os.path.join(
- os.path.dirname(Path(output_dir)),
- os.path.basename(Path(output_dir)),
+ output_dir = os.path.join( # noqa: PTH118
+ os.path.dirname(Path(output_dir)), # noqa: PTH120
+ os.path.basename(Path(output_dir)), # noqa: PTH119
)
# separate directory for IM
- output_dir = os.path.join(output_dir, 'IMs')
+ output_dir = os.path.join(output_dir, 'IMs') # noqa: PTH118
try:
- os.makedirs(output_dir)
- except:
- print('HazardSimulation: output folder already exists.')
+ os.makedirs(output_dir) # noqa: PTH103
+ except: # noqa: E722
+ print('HazardSimulation: output folder already exists.') # noqa: T201
# save the csv
- df.to_csv(os.path.join(output_dir, 'EventGrid.csv'), index=False)
+ df.to_csv(os.path.join(output_dir, 'EventGrid.csv'), index=False) # noqa: PTH118
# output station#.csv
# csv header
- csvHeader = im_list
+ csvHeader = im_list # noqa: N806
for cur_scen in range(len(im_data)):
if len(im_data) > 1:
# IMPORTANT: the scenario index starts with 1 in the front end.
cur_scen_folder = 'scenario' + str(int(scenario_ids[cur_scen]) + 1)
- try:
- os.mkdir(os.path.join(output_dir, cur_scen_folder))
- except:
+ try: # noqa: SIM105
+ os.mkdir(os.path.join(output_dir, cur_scen_folder)) # noqa: PTH102, PTH118
+ except: # noqa: S110, E722
pass
# print('ComputeIntensityMeasure: scenario folder already exists.')
- cur_output_dir = os.path.join(output_dir, cur_scen_folder)
+ cur_output_dir = os.path.join(output_dir, cur_scen_folder) # noqa: PTH118
else:
cur_output_dir = output_dir
# current IM data
cur_im_data = im_data[cur_scen]
for i, site_id in enumerate(station_name):
- df = dict()
+ df = dict() # noqa: C408, PD901
# Loop over all intensity measures
for cur_im_tag in range(len(csvHeader)):
if (csvHeader[cur_im_tag].startswith('SA')) or (
@@ -1106,56 +1106,56 @@ def export_im(
df.update(
{csvHeader[cur_im_tag]: cur_im_data[i, cur_im_tag, :]}
)
- df = pd.DataFrame(df)
+ df = pd.DataFrame(df) # noqa: PD901
# Combine PGD from liquefaction, landslide and fault
if (
'liq_PGD_h' in df.columns
or 'ls_PGD_h' in df.columns
or 'fd_PGD_h' in df.columns
):
- PGD_h = np.zeros(df.shape[0])
+ PGD_h = np.zeros(df.shape[0]) # noqa: N806
if 'liq_PGD_h' in df.columns:
- PGD_h += df['liq_PGD_h'].to_numpy()
+ PGD_h += df['liq_PGD_h'].to_numpy() # noqa: N806
if 'ls_PGD_h' in df.columns:
- PGD_h += df['ls_PGD_h'].to_numpy()
+ PGD_h += df['ls_PGD_h'].to_numpy() # noqa: N806
if 'fd_PGD_h' in df.columns:
- PGD_h += df['fd_PGD_h'].to_numpy()
+ PGD_h += df['fd_PGD_h'].to_numpy() # noqa: N806
df['PGD_h'] = PGD_h
if (
'liq_PGD_v' in df.columns
or 'ls_PGD_v' in df.columns
or 'fd_PGD_v' in df.columns
):
- PGD_v = np.zeros(df.shape[0])
+ PGD_v = np.zeros(df.shape[0]) # noqa: N806
if 'liq_PGD_v' in df.columns:
- PGD_v += df['liq_PGD_v'].to_numpy()
+ PGD_v += df['liq_PGD_v'].to_numpy() # noqa: N806
if 'ls_PGD_v' in df.columns:
- PGD_v += df['ls_PGD_v'].to_numpy()
+ PGD_v += df['ls_PGD_v'].to_numpy() # noqa: N806
if 'fd_PGD_v' in df.columns:
- PGD_v += df['fd_PGD_v'].to_numpy()
+ PGD_v += df['fd_PGD_v'].to_numpy() # noqa: N806
df['PGD_v'] = PGD_v
- colToDrop = []
+ colToDrop = [] # noqa: N806
for col in df.columns:
if (
(not col.startswith('SA'))
and (col not in ['PGA', 'PGV', 'PGD_h', 'PGD_v'])
and (col not in gf_im_list)
):
- colToDrop.append(col)
- df.drop(columns=colToDrop, inplace=True)
+ colToDrop.append(col) # noqa: PERF401
+ df.drop(columns=colToDrop, inplace=True) # noqa: PD002
# if 'liq_prob' in df.columns:
# df.drop(columns=['liq_prob'], inplace=True)
# if 'liq_susc' in df.columns:
# df.drop(columns=['liq_susc'], inplace=True)
- df.fillna('NaN', inplace=True)
- df.to_csv(os.path.join(cur_output_dir, site_id), index=False)
+ df.fillna('NaN', inplace=True) # noqa: PD002
+ df.to_csv(os.path.join(cur_output_dir, site_id), index=False) # noqa: PTH118
# output the site#.csv file including all scenarios
if len(im_data) > 1:
- print('ComputeIntensityMeasure: saving all selected scenarios.')
+ print('ComputeIntensityMeasure: saving all selected scenarios.') # noqa: T201
# lopp over sites
for i, site_id in enumerate(station_name):
- df = dict()
+ df = dict() # noqa: C408, PD901
for cur_im_tag in range(len(csvHeader)):
tmp_list = []
# loop over all scenarios
@@ -1169,35 +1169,35 @@ def export_im(
df.update({csvHeader[cur_im_tag]: np.exp(tmp_list)})
else:
df.update({csvHeader[cur_im_tag]: tmp_list})
- df = pd.DataFrame(df)
+ df = pd.DataFrame(df) # noqa: PD901
# Combine PGD from liquefaction, landslide and fault
if (
'liq_PGD_h' in df.columns
or 'ls_PGD_h' in df.columns
or 'fd_PGD_h' in df.columns
):
- PGD_h = np.zeros(df.shape[0])
+ PGD_h = np.zeros(df.shape[0]) # noqa: N806
if 'liq_PGD_h' in df.columns:
- PGD_h += df['liq_PGD_h'].to_numpy()
+ PGD_h += df['liq_PGD_h'].to_numpy() # noqa: N806
if 'ls_PGD_h' in df.columns:
- PGD_h += df['ls_PGD_h'].to_numpy()
+ PGD_h += df['ls_PGD_h'].to_numpy() # noqa: N806
if 'fd_PGD_h' in df.columns:
- PGD_h += df['fd_PGD_h'].to_numpy()
+ PGD_h += df['fd_PGD_h'].to_numpy() # noqa: N806
df['PGD_h'] = PGD_h
if (
'liq_PGD_v' in df.columns
or 'ls_PGD_v' in df.columns
or 'fd_PGD_v' in df.columns
):
- PGD_v = np.zeros(df.shape[0])
+ PGD_v = np.zeros(df.shape[0]) # noqa: N806
if 'liq_PGD_v' in df.columns:
- PGD_v += df['liq_PGD_v'].to_numpy()
+ PGD_v += df['liq_PGD_v'].to_numpy() # noqa: N806
if 'ls_PGD_v' in df.columns:
- PGD_v += df['ls_PGD_v'].to_numpy()
+ PGD_v += df['ls_PGD_v'].to_numpy() # noqa: N806
if 'fd_PGD_v' in df.columns:
- PGD_v += df['fd_PGD_v'].to_numpy()
+ PGD_v += df['fd_PGD_v'].to_numpy() # noqa: N806
df['PGD_v'] = PGD_v
- colToDrop = []
+ colToDrop = [] # noqa: N806
for col in df.columns:
if (
(not col.startswith('SA'))
@@ -1205,9 +1205,9 @@ def export_im(
and (col not in gf_im_list)
):
colToDrop.append(col)
- df.drop(columns=colToDrop, inplace=True)
- df.fillna('NaN', inplace=True)
- df.to_csv(os.path.join(output_dir, site_id), index=False)
+ df.drop(columns=colToDrop, inplace=True) # noqa: PD002
+ df.fillna('NaN', inplace=True) # noqa: PD002
+ df.to_csv(os.path.join(output_dir, site_id), index=False) # noqa: PTH118
# return
return 0
# except:
@@ -1215,7 +1215,7 @@ def export_im(
# return 1
-def compute_weighted_res(res_list, gmpe_weights):
+def compute_weighted_res(res_list, gmpe_weights): # noqa: C901, D103
# compute weighted average of gmpe results
# initialize the return res (these three attributes are identical in different gmpe results)
res = {
@@ -1229,7 +1229,7 @@ def compute_weighted_res(res_list, gmpe_weights):
num_gmpe = len(res_list)
# check number of weights
if num_gmpe != len(gmpe_weights):
- print(
+ print( # noqa: T201
'ComputeIntensityMeasure: please check the weights of different GMPEs.'
)
return 1
@@ -1241,7 +1241,7 @@ def compute_weighted_res(res_list, gmpe_weights):
# loop over different GMPE
tmp_res = {}
for i, cur_res in enumerate(res_list):
- cur_gmResults = cur_res['GroundMotions'][site_tag]
+ cur_gmResults = cur_res['GroundMotions'][site_tag] # noqa: N806
# get keys
im_keys = list(cur_gmResults.keys())
for cur_im in im_keys:
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateScenario.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateScenario.py
index 4b90eefb4..4e423b4fb 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateScenario.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateScenario.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -48,16 +48,16 @@
import pandas as pd
if 'stampede2' not in socket.gethostname():
- from FetchOpenSHA import *
+ from FetchOpenSHA import * # noqa: F403
-def get_rups_to_run(scenario_info, user_scenarios, num_scenarios):
+def get_rups_to_run(scenario_info, user_scenarios, num_scenarios): # noqa: C901, D103
# If there is a filter
if scenario_info['Generator'].get('method', None) == 'ScenarioSpecific':
- SourceIndex = scenario_info['Generator'].get('SourceIndex', None)
- RupIndex = scenario_info['Generator'].get('RuptureIndex', None)
+ SourceIndex = scenario_info['Generator'].get('SourceIndex', None) # noqa: N806
+ RupIndex = scenario_info['Generator'].get('RuptureIndex', None) # noqa: N806
if (SourceIndex is None) or (RupIndex is None):
- print(
+ print( # noqa: T201
'Both SourceIndex and RuptureIndex are needed for'
'ScenarioSpecific analysis'
)
@@ -104,13 +104,13 @@ def get_rups_to_run(scenario_info, user_scenarios, num_scenarios):
return rups_to_run
-def load_earthquake_rupFile(scenario_info, rupFilePath):
+def load_earthquake_rupFile(scenario_info, rupFilePath): # noqa: N802, N803, D103
# Getting earthquake rupture forecast data
source_type = scenario_info['EqRupture']['Type']
try:
- with open(rupFilePath) as f:
+ with open(rupFilePath) as f: # noqa: PTH123
user_scenarios = json.load(f)
- except:
+ except: # noqa: E722
sys.exit(f'CreateScenario: source file {rupFilePath} not found.')
# number of features (i.e., ruptures)
num_scenarios = len(user_scenarios.get('features', []))
@@ -150,8 +150,8 @@ def load_earthquake_rupFile(scenario_info, rupFilePath):
}
)
elif source_type == 'PointSource':
- sourceID = 0
- rupID = 0
+ sourceID = 0 # noqa: N806
+ rupID = 0 # noqa: N806
for rup_tag in rups_to_run:
try:
cur_rup = user_scenarios.get('features')[rup_tag]
@@ -172,14 +172,14 @@ def load_earthquake_rupFile(scenario_info, rupFilePath):
}
}
)
- rupID = rupID + 1
- except:
- print('Please check point-source inputs.')
+ rupID = rupID + 1 # noqa: N806
+ except: # noqa: PERF203, E722
+ print('Please check point-source inputs.') # noqa: T201
# return
return scenario_data
-def load_ruptures_openquake(scenario_info, stations, work_dir, siteFile, rupFile):
+def load_ruptures_openquake(scenario_info, stations, work_dir, siteFile, rupFile): # noqa: C901, N803, D103
# Collecting all possible earthquake scenarios
lat = []
lon = []
@@ -201,18 +201,18 @@ def load_ruptures_openquake(scenario_info, stations, work_dir, siteFile, rupFile
from openquake.hazardlib.geo.surface.base import BaseSurface
try:
- with open(rupFile) as f:
+ with open(rupFile) as f: # noqa: PTH123
user_scenarios = json.load(f)
- except:
+ except: # noqa: E722
sys.exit(f'CreateScenario: source file {rupFile} not found.')
# number of features (i.e., ruptures)
num_scenarios = len(user_scenarios.get('features', []))
if num_scenarios < 1:
sys.exit('CreateScenario: source file is empty.')
rups_to_run = get_rups_to_run(scenario_info, user_scenarios, num_scenarios)
- in_dir = os.path.join(work_dir, 'Input')
+ in_dir = os.path.join(work_dir, 'Input') # noqa: PTH118
oq = readinput.get_oqparam(
- dict(
+ dict( # noqa: C408
calculation_mode='classical',
inputs={'site_model': [siteFile]},
intensity_measure_types_and_levels="{'PGA': [0.1], 'SA(0.1)': [0.1]}", # place holder for initiating oqparam. Not used in ERF
@@ -235,7 +235,7 @@ def load_ruptures_openquake(scenario_info, stations, work_dir, siteFile, rupFile
rupture_mesh_spacing = scenario_info['EqRupture']['rupture_mesh_spacing']
rupture_mesh_spacing = scenario_info['EqRupture']['rupture_mesh_spacing']
[src_nrml] = nrml.read(
- os.path.join(in_dir, scenario_info['EqRupture']['sourceFile'])
+ os.path.join(in_dir, scenario_info['EqRupture']['sourceFile']) # noqa: PTH118
)
conv = sourceconverter.SourceConverter(
scenario_info['EqRupture']['investigation_time'],
@@ -249,12 +249,12 @@ def load_ruptures_openquake(scenario_info, stations, work_dir, siteFile, rupFile
sources = []
sources_dist = []
sources_id = []
- id = 0
- siteMeanCol = site.SiteCollection.from_points([mlon], [mlat])
+ id = 0 # noqa: A001
+ siteMeanCol = site.SiteCollection.from_points([mlon], [mlat]) # noqa: N806
srcfilter = SourceFilter(siteMeanCol, oq.maximum_distance)
for i in range(len(src_nrml)):
subnode = src_nrml[i]
- subSrc = src_raw[i]
+ subSrc = src_raw[i] # noqa: N806
tag = (
subnode.tag.rsplit('}')[1]
if subnode.tag.startswith('{')
@@ -263,7 +263,7 @@ def load_ruptures_openquake(scenario_info, stations, work_dir, siteFile, rupFile
if tag == 'sourceGroup':
for j in range(len(subnode)):
subsubnode = subnode[j]
- subsubSrc = subSrc[j]
+ subsubSrc = subSrc[j] # noqa: N806
subtag = (
subsubnode.tag.rsplit('}')[1]
if subsubnode.tag.startswith('{')
@@ -275,22 +275,22 @@ def load_ruptures_openquake(scenario_info, stations, work_dir, siteFile, rupFile
):
subsubSrc.id = id
sources_id.append(id)
- id += 1
+ id += 1 # noqa: A001
sources.append(subsubSrc)
- sourceMesh = subsubSrc.polygon.discretize(rupture_mesh_spacing)
- sourceSurface = BaseSurface(sourceMesh)
- siteMesh = Mesh(siteMeanCol.lon, siteMeanCol.lat)
+ sourceMesh = subsubSrc.polygon.discretize(rupture_mesh_spacing) # noqa: N806
+ sourceSurface = BaseSurface(sourceMesh) # noqa: N806
+ siteMesh = Mesh(siteMeanCol.lon, siteMeanCol.lat) # noqa: N806
sources_dist.append(sourceSurface.get_min_distance(siteMesh))
elif (
tag.endswith('Source') and srcfilter.get_close_sites(subSrc) is not None
):
subSrc.id = id
sources_id.append(id)
- id += 1
+ id += 1 # noqa: A001
sources.append(subSrc)
- sourceMesh = subSrc.polygon.discretize(rupture_mesh_spacing)
- sourceSurface = BaseSurface(sourceMesh)
- siteMesh = Mesh(siteMeanCol.lon, siteMeanCol.lat)
+ sourceMesh = subSrc.polygon.discretize(rupture_mesh_spacing) # noqa: N806
+ sourceSurface = BaseSurface(sourceMesh) # noqa: N806
+ siteMesh = Mesh(siteMeanCol.lon, siteMeanCol.lat) # noqa: N806
sources_dist.append(sourceSurface.get_min_distance(siteMesh))
sources_df = pd.DataFrame.from_dict(
{'source': sources, 'sourceDist': sources_dist, 'sourceID': sources_id}
@@ -298,8 +298,8 @@ def load_ruptures_openquake(scenario_info, stations, work_dir, siteFile, rupFile
sources_df = sources_df.sort_values(['sourceDist'], ascending=(True))
sources_df = sources_df.set_index('sourceID')
allrups = []
- allrups_rRup = []
- allrups_srcId = []
+ allrups_rRup = [] # noqa: N806
+ allrups_srcId = [] # noqa: N806
allrups_mar = []
for src in sources_df['source']:
src_rups = list(src.iter_ruptures())
@@ -322,7 +322,7 @@ def load_ruptures_openquake(scenario_info, stations, work_dir, siteFile, rupFile
maf_list_n = [-x for x in rups_df['MeanAnnualRate']]
sort_ids = np.argsort(maf_list_n)
rups_df = rups_df.iloc[sort_ids]
- rups_df.reset_index(drop=True, inplace=True)
+ rups_df.reset_index(drop=True, inplace=True) # noqa: PD002
# rups_df = rups_df = rups_df.sort_values(['MeanAnnualRate'], ascending = (False))
rups_df = rups_df.loc[rups_to_run, :]
scenario_data = {}
@@ -351,14 +351,14 @@ def load_ruptures_openquake(scenario_info, stations, work_dir, siteFile, rupFile
return scenario_data
-def load_earthquake_scenarios(scenario_info, stations, dir_info):
+def load_earthquake_scenarios(scenario_info, stations, dir_info): # noqa: D103
# Number of scenarios
- source_num = scenario_info.get('Number', 1)
+ source_num = scenario_info.get('Number', 1) # noqa: F841
# sampling method
- samp_method = scenario_info['EqRupture'].get('Sampling', 'Random')
+ samp_method = scenario_info['EqRupture'].get('Sampling', 'Random') # noqa: F841
# source model
source_model = scenario_info['EqRupture']['Model']
- eq_source = getERF(scenario_info)
+ eq_source = getERF(scenario_info) # noqa: F405
# Getting earthquake rupture forecast data
source_type = scenario_info['EqRupture']['Type']
# Collecting all sites
@@ -368,19 +368,19 @@ def load_earthquake_scenarios(scenario_info, stations, dir_info):
lat.append(s['Latitude'])
lon.append(s['Longitude'])
# load scenario file
- user_scenario_file = os.path.join(
+ user_scenario_file = os.path.join( # noqa: PTH118
dir_info.get('Input'), scenario_info.get('EqRupture').get('UserScenarioFile')
)
try:
- with open(user_scenario_file) as f:
+ with open(user_scenario_file) as f: # noqa: PTH123
user_scenarios = json.load(f)
- except:
- print(f'CreateScenario: source file {user_scenario_file} not found.')
+ except: # noqa: E722
+ print(f'CreateScenario: source file {user_scenario_file} not found.') # noqa: T201
return {}
# number of features (i.e., ruptures)
num_scenarios = len(user_scenarios.get('features', []))
if num_scenarios < 1:
- print('CreateScenario: source file is empty.')
+ print('CreateScenario: source file is empty.') # noqa: T201
return {}
# get rupture and source ids
scenario_data = {}
@@ -389,11 +389,11 @@ def load_earthquake_scenarios(scenario_info, stations, dir_info):
cur_id_source = cur_rup.get('properties').get('Source', None)
cur_id_rupture = cur_rup.get('properties').get('Rupture', None)
if cur_id_rupture is None or cur_id_source is None:
- print(
+ print( # noqa: T201
f'CreateScenario: rupture #{rup_tag} does not have valid source/rupture ID - skipped.'
)
continue
- cur_source, cur_rupture = get_source_rupture(
+ cur_source, cur_rupture = get_source_rupture( # noqa: F405
eq_source, cur_id_source, cur_id_rupture
)
scenario_data.update(
@@ -410,10 +410,10 @@ def load_earthquake_scenarios(scenario_info, stations, dir_info):
),
'SourceIndex': cur_id_source,
'RuptureIndex': cur_id_rupture,
- 'SiteSourceDistance': get_source_distance(
+ 'SiteSourceDistance': get_source_distance( # noqa: F405
eq_source, cur_id_source, lat, lon
),
- 'SiteRuptureDistance': get_rupture_distance(
+ 'SiteRuptureDistance': get_rupture_distance( # noqa: F405
eq_source, cur_id_source, cur_id_rupture, lat, lon
),
}
@@ -424,24 +424,24 @@ def load_earthquake_scenarios(scenario_info, stations, dir_info):
return scenario_data
-def create_earthquake_scenarios(
+def create_earthquake_scenarios( # noqa: C901, D103
scenario_info,
stations,
work_dir,
- openquakeSiteFile=None,
+ openquakeSiteFile=None, # noqa: N803
):
# # Number of scenarios
# source_num = scenario_info.get('Number', 1)
# if source_num == 'All':
# # Large number to consider all sources in the ERF
# source_num = 10000000
- out_dir = os.path.join(work_dir, 'Output')
+ out_dir = os.path.join(work_dir, 'Output') # noqa: PTH118
if scenario_info['Generator'] == 'Simulation':
- # TODO:
- print('Physics-based earthquake simulation is under development.')
+ # TODO: # noqa: TD002
+ print('Physics-based earthquake simulation is under development.') # noqa: T201
return 1
# Searching earthquake ruptures that fulfill the request
- elif scenario_info['Generator'] == 'Selection':
+ elif scenario_info['Generator'] == 'Selection': # noqa: RET505
# Collecting all possible earthquake scenarios
lat = []
lon = []
@@ -457,31 +457,31 @@ def create_earthquake_scenarios(
t_start = time.time()
if source_type == 'ERF':
if (
- 'SourceIndex' in scenario_info['EqRupture'].keys()
- and 'RuptureIndex' in scenario_info['EqRupture'].keys()
+ 'SourceIndex' in scenario_info['EqRupture'].keys() # noqa: SIM118
+ and 'RuptureIndex' in scenario_info['EqRupture'].keys() # noqa: SIM118
):
source_model = scenario_info['EqRupture']['Model']
- eq_source = getERF(scenario_info)
+ eq_source = getERF(scenario_info) # noqa: F405
# check source index list and rupture index list
- if type(scenario_info['EqRupture']['SourceIndex']) == int:
+ if type(scenario_info['EqRupture']['SourceIndex']) == int: # noqa: E721
source_index_list = [scenario_info['EqRupture']['SourceIndex']]
else:
source_index_list = scenario_info['EqRupture']['SourceIndex']
- if type(scenario_info['EqRupture']['RuptureIndex']) == int:
+ if type(scenario_info['EqRupture']['RuptureIndex']) == int: # noqa: E721
rup_index_list = [scenario_info['EqRupture']['RuptureIndex']]
else:
rup_index_list = scenario_info['EqRupture']['RuptureIndex']
if len(source_index_list) != len(rup_index_list):
- print(
+ print( # noqa: T201
f'CreateScenario: source number {len(source_index_list)} should be matched by rupture number {len(rup_index_list)}'
)
- return dict()
+ return dict() # noqa: C408
# loop over all scenarios
- scenario_data = dict()
+ scenario_data = dict() # noqa: C408
for i in range(len(source_index_list)):
cur_source_index = source_index_list[i]
cur_rup_index = rup_index_list[i]
- distToSource = get_source_distance(
+ distToSource = get_source_distance( # noqa: N806, F405
eq_source, cur_source_index, lat, lon
)
scenario_data.update(
@@ -492,7 +492,7 @@ def create_earthquake_scenarios(
'SourceIndex': cur_source_index,
'RuptureIndex': cur_rup_index,
'SiteSourceDistance': distToSource,
- 'SiteRuptureDistance': get_rupture_distance(
+ 'SiteRuptureDistance': get_rupture_distance( # noqa: F405
eq_source,
cur_source_index,
cur_rup_index,
@@ -503,17 +503,17 @@ def create_earthquake_scenarios(
}
)
return scenario_data
- else:
+ else: # noqa: RET505
source_model = scenario_info['EqRupture']['Model']
source_name = scenario_info['EqRupture'].get('Name', None)
- min_M = scenario_info['EqRupture'].get('min_Mag', 5.0)
- max_M = scenario_info['EqRupture'].get('max_Mag', 9.0)
- max_R = scenario_info['EqRupture'].get('max_Dist', 1000.0)
- eq_source = getERF(scenario_info)
- erf_data = export_to_json(
+ min_M = scenario_info['EqRupture'].get('min_Mag', 5.0) # noqa: N806
+ max_M = scenario_info['EqRupture'].get('max_Mag', 9.0) # noqa: N806
+ max_R = scenario_info['EqRupture'].get('max_Dist', 1000.0) # noqa: N806
+ eq_source = getERF(scenario_info) # noqa: F405
+ erf_data = export_to_json( # noqa: F405, F841
eq_source,
ref_station,
- outfile=os.path.join(out_dir, 'RupFile.geojson'),
+ outfile=os.path.join(out_dir, 'RupFile.geojson'), # noqa: PTH118
EqName=source_name,
minMag=min_M,
maxMag=max_M,
@@ -556,10 +556,10 @@ def create_earthquake_scenarios(
# del erf_data
elif source_type == 'PointSource':
# Export to a geojson format RupFile.json
- outfile = os.path.join(out_dir, 'RupFile.geojson')
- pointSource_data = {'type': 'FeatureCollection'}
+ outfile = os.path.join(out_dir, 'RupFile.geojson') # noqa: PTH118
+ pointSource_data = {'type': 'FeatureCollection'} # noqa: N806
feature_collection = []
- newRup = {
+ newRup = { # noqa: N806
'type': 'Feature',
'properties': {
'Type': source_type,
@@ -571,7 +571,7 @@ def create_earthquake_scenarios(
'Rupture': 0,
},
}
- newRup['geometry'] = dict()
+ newRup['geometry'] = dict() # noqa: C408
newRup['geometry'].update({'type': 'Point'})
newRup['geometry'].update(
{
@@ -584,32 +584,32 @@ def create_earthquake_scenarios(
feature_collection.append(newRup)
pointSource_data.update({'features': feature_collection})
if outfile is not None:
- print(f'The collected point source ruptures are saved in {outfile}')
- with open(outfile, 'w') as f:
+ print(f'The collected point source ruptures are saved in {outfile}') # noqa: T201
+ with open(outfile, 'w') as f: # noqa: PTH123
json.dump(pointSource_data, f, indent=2)
elif source_type == 'oqSourceXML':
import FetchOpenQuake
- siteFile = os.path.join(work_dir, 'Input', openquakeSiteFile)
+ siteFile = os.path.join(work_dir, 'Input', openquakeSiteFile) # noqa: PTH118, N806
FetchOpenQuake.export_rupture_to_json(
scenario_info, mlon, mlat, siteFile, work_dir
)
- print(
+ print( # noqa: T201
f'CreateScenario: all scenarios configured {time.time() - t_start} sec'
)
# return
return None
-def sample_scenarios(
- rup_info=[],
+def sample_scenarios( # noqa: D103
+ rup_info=[], # noqa: B006
sample_num=1,
sample_type='Random',
source_name=None,
- min_M=0.0,
+ min_M=0.0, # noqa: N803
):
if len(rup_info) == 0:
- print(
+ print( # noqa: T201
'CreateScenario.sample_scenarios: no available scenario provided - please relax earthquake filters.'
)
return []
@@ -636,16 +636,16 @@ def sample_scenarios(
s_tag = np.random.choice(tag, sample_num, p=maf_list_n).tolist()
else:
- print('CreateScenario.sample_scenarios: please specify a sampling method.')
+ print('CreateScenario.sample_scenarios: please specify a sampling method.') # noqa: T201
s_tag = []
# return
return s_tag
-def create_wind_scenarios(scenario_info, stations, data_dir):
+def create_wind_scenarios(scenario_info, stations, data_dir): # noqa: D103
# Number of scenarios
- source_num = scenario_info.get('Number', 1)
+ source_num = scenario_info.get('Number', 1) # noqa: F841
# Directly defining earthquake ruptures
if scenario_info['Generator'] == 'Simulation':
# Collecting site locations
@@ -655,30 +655,30 @@ def create_wind_scenarios(scenario_info, stations, data_dir):
lat.append(s['Latitude'])
lon.append(s['Longitude'])
# Save Stations.csv
- df = pd.DataFrame({'lat': lat, 'lon': lon})
+ df = pd.DataFrame({'lat': lat, 'lon': lon}) # noqa: PD901
df.to_csv(data_dir + 'Stations.csv', index=False, header=False)
# Save Lat_w.csv
lat_w = np.linspace(min(lat) - 0.5, max(lat) + 0.5, 100)
- df = pd.DataFrame({'lat_w': lat_w})
+ df = pd.DataFrame({'lat_w': lat_w}) # noqa: PD901
df.to_csv(data_dir + 'Lat_w.csv', index=False, header=False)
# Parsing Terrain info
- df = pd.read_csv(
+ df = pd.read_csv( # noqa: PD901
data_dir + scenario_info['Terrain']['Longitude'],
header=None,
index_col=None,
)
df.to_csv(data_dir + 'Long_wr.csv', header=False, index=False)
- df = pd.read_csv(
+ df = pd.read_csv( # noqa: PD901
data_dir + scenario_info['Terrain']['Latitude'],
header=None,
index_col=None,
)
df.to_csv(data_dir + 'Lat_wr.csv', header=False, index=False)
- df = pd.read_csv(
+ df = pd.read_csv( # noqa: PD901
data_dir + scenario_info['Terrain']['Size'], header=None, index_col=None
)
df.to_csv(data_dir + 'wr_sizes.csv', header=False, index=False)
- df = pd.read_csv(
+ df = pd.read_csv( # noqa: PD901
data_dir + scenario_info['Terrain']['z0'], header=None, index_col=None
)
df.to_csv(data_dir + 'z0r.csv', header=False, index=False)
@@ -690,15 +690,15 @@ def create_wind_scenarios(scenario_info, stations, data_dir):
param.append(scenario_info['Storm']['Pressure'])
param.append(scenario_info['Storm']['Speed'])
param.append(scenario_info['Storm']['Radius'])
- df = pd.DataFrame({'param': param})
+ df = pd.DataFrame({'param': param}) # noqa: PD901
df.to_csv(data_dir + 'param.csv', index=False, header=False)
- df = pd.read_csv(
+ df = pd.read_csv( # noqa: PD901
data_dir + scenario_info['Storm']['Track'], header=None, index_col=None
)
df.to_csv(data_dir + 'Track.csv', header=False, index=False)
# Saving del_par.csv
del_par = [0, 0, 0] # default
- df = pd.DataFrame({'del_par': del_par})
+ df = pd.DataFrame({'del_par': del_par}) # noqa: PD901
df.to_csv(data_dir + 'del_par.csv', header=False, index=False)
# Parsing resolution data
delta_p = [1000.0, scenario_info['Resolution']['DivRad'], 1000000.0]
@@ -706,7 +706,7 @@ def create_wind_scenarios(scenario_info, stations, data_dir):
delta_p.extend(
[scenario_info['MeasureHeight'], 10, scenario_info['MeasureHeight']]
)
- df = pd.DataFrame({'delta_p': delta_p})
+ df = pd.DataFrame({'delta_p': delta_p}) # noqa: PD901
df.to_csv(data_dir + 'delta_p.csv', header=False, index=False)
else:
- print('Currently only supporting Simulation generator.')
+ print('Currently only supporting Simulation generator.') # noqa: T201
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py
index d66921d12..a03932afc 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/CreateStation.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -52,17 +52,17 @@
)
-def get_label(options, labels, label_name):
+def get_label(options, labels, label_name): # noqa: D103
for option in options:
if option in labels:
labels = labels[labels != option]
return option, labels
- print(f'WARNING: Could not identify the label for the {label_name}')
+ print(f'WARNING: Could not identify the label for the {label_name}') # noqa: T201, RET503
class Station:
- """A class for stations in an earthquake scenario"""
+ """A class for stations in an earthquake scenario""" # noqa: D400
def __init__(self, lon, lat, vs30=None, z2p5=None):
# Initializing the location, vs30, z2.5, Tcond and other Tags
@@ -71,28 +71,28 @@ def __init__(self, lon, lat, vs30=None, z2p5=None):
self.vs30 = vs30
self.z2p5 = z2p5
- def get_location(self):
+ def get_location(self): # noqa: D102
# Returning the geo location
return self.lon, self.lat
- def get_vs30(self):
+ def get_vs30(self): # noqa: D102
# Returning the Vs30 at the station
return self.vs30
- def get_z2p5(self):
+ def get_z2p5(self): # noqa: D102
# Returning the z2.5 of the station
return self.z2p5
-def create_stations(
+def create_stations( # noqa: C901, PLR0912, PLR0915
input_file,
output_file,
- filterIDs,
- vs30Config,
- z1Config,
- z25Config,
- zTR_tag=0,
- soil_flag=False,
+ filterIDs, # noqa: N803
+ vs30Config, # noqa: N803
+ z1Config, # noqa: N803
+ z25Config, # noqa: N803
+ zTR_tag=0, # noqa: N803
+ soil_flag=False, # noqa: FBT002
soil_model_type=None,
soil_user_fun=None,
):
@@ -107,14 +107,14 @@ def create_stations(
z2pt5_tag: z2pt5 tag: 1 - using empirical equation, 0 - leave it as null
Output:
stn_file: dictionary of station data
- """
+ """ # noqa: D205, D400, D401
# Reading csv data
run_tag = 1
try:
stn_df = pd.read_csv(input_file, header=0, index_col=0)
- except:
+ except: # noqa: E722
run_tag = 0
- return run_tag
+ return run_tag # noqa: RET504
# Max and Min IDs
if len(filterIDs) > 0:
stns_requested = []
@@ -125,7 +125,7 @@ def create_stations(
else:
stns_requested.append(int(stns))
stns_requested = np.array(stns_requested)
- stns_available = stn_df.index.values
+ stns_available = stn_df.index.values # noqa: PD011
stns_to_run = stns_requested[
np.where(np.isin(stns_requested, stns_available))[0]
]
@@ -143,21 +143,21 @@ def create_stations(
# selected_stn = copy.copy(stn_df.loc[min_id:max_id, :])
selected_stn.index = list(range(len(selected_stn.index)))
# Extracting data
- labels = selected_stn.columns.values
+ labels = selected_stn.columns.values # noqa: PD011
lon_label, labels = get_label(
['Longitude', 'longitude', 'lon', 'Lon'], labels, 'longitude'
)
lat_label, labels = get_label(
['Latitude', 'latitude', 'lat', 'Lat'], labels, 'latitude'
)
- if any([i in ['Vs30', 'vs30', 'Vs_30', 'vs_30'] for i in labels]):
+ if any([i in ['Vs30', 'vs30', 'Vs_30', 'vs_30'] for i in labels]): # noqa: C419
vs30_label, labels = get_label(
['Vs30', 'vs30', 'Vs_30', 'vs_30'], labels, 'vs30'
)
else:
vs30_label = 'Vs30'
if any(
- [
+ [ # noqa: C419
i in ['Z2p5', 'z2p5', 'Z2pt5', 'z2pt5', 'Z25', 'z25', 'Z2.5', 'z2.5']
for i in labels
]
@@ -170,7 +170,7 @@ def create_stations(
else:
z2p5_label = 'z2p5'
if any(
- [
+ [ # noqa: C419
i in ['Z1p0', 'z1p0', 'Z1pt0', 'z1pt0', 'Z1', 'z1', 'Z1.0', 'z1.0']
for i in labels
]
@@ -182,14 +182,14 @@ def create_stations(
)
else:
z1p0_label = 'z1p0'
- if any([i in ['zTR', 'ztr', 'ZTR', 'DepthToRock'] for i in labels]):
- zTR_label, labels = get_label(
+ if any([i in ['zTR', 'ztr', 'ZTR', 'DepthToRock'] for i in labels]): # noqa: C419
+ zTR_label, labels = get_label( # noqa: N806
['zTR', 'ztr', 'ZTR', 'DepthToRock'], labels, 'zTR'
)
else:
- zTR_label = 'DepthToRock'
+ zTR_label = 'DepthToRock' # noqa: N806
if soil_flag:
- if any([i in ['Model', 'model', 'SoilModel', 'soilModel'] for i in labels]):
+ if any([i in ['Model', 'model', 'SoilModel', 'soilModel'] for i in labels]): # noqa: C419
soil_model_label, labels = get_label(
['Model', 'model', 'SoilModel', 'soilModel'], labels, 'Model'
)
@@ -206,17 +206,17 @@ def create_stations(
selected_stn[soil_model_label] = [
soil_model_tag for x in range(len(selected_stn.index))
]
- STN = []
+ STN = [] # noqa: N806
stn_file = {'Stations': []}
# Get Vs30
if vs30Config['Type'] == 'User-specified':
- if vs30_label not in selected_stn.keys():
+ if vs30_label not in selected_stn.keys(): # noqa: SIM118
sys.exit(
- 'ERROR: User-specified option is selected for Vs30 model but the provided.'
+ 'ERROR: User-specified option is selected for Vs30 model but the provided.' # noqa: ISC003
+ "but the provided Site File doesn't contain a column named 'Vs30'."
+ '\nNote: the User-specified Vs30 model is only supported for Scattering Locations site definition.'
)
- tmp = selected_stn.iloc[
+ tmp = selected_stn.iloc[ # noqa: PD011
:, list(selected_stn.keys()).index(vs30_label)
].values.tolist()
if len(tmp):
@@ -234,49 +234,49 @@ def create_stations(
else:
vs30_tag = 0
if len(nan_loc) and vs30_tag == 1:
- print('CreateStation: Interpolating global Vs30 map for defined stations.')
+ print('CreateStation: Interpolating global Vs30 map for defined stations.') # noqa: T201
selected_stn.loc[nan_loc, vs30_label] = get_vs30_global(
- selected_stn.iloc[
+ selected_stn.iloc[ # noqa: PD011
nan_loc, list(selected_stn.keys()).index(lat_label)
].values.tolist(),
- selected_stn.iloc[
+ selected_stn.iloc[ # noqa: PD011
nan_loc, list(selected_stn.keys()).index(lon_label)
].values.tolist(),
)
- if len(nan_loc) and vs30_tag == 2:
- print('CreateStation: Interpolating Thompson Vs30 map for defined stations.')
+ if len(nan_loc) and vs30_tag == 2: # noqa: PLR2004
+ print('CreateStation: Interpolating Thompson Vs30 map for defined stations.') # noqa: T201
selected_stn.loc[nan_loc, vs30_label] = get_vs30_thompson(
- selected_stn.iloc[
+ selected_stn.iloc[ # noqa: PD011
nan_loc, list(selected_stn.keys()).index(lat_label)
].values.tolist(),
- selected_stn.iloc[
+ selected_stn.iloc[ # noqa: PD011
nan_loc, list(selected_stn.keys()).index(lon_label)
].values.tolist(),
)
- if len(nan_loc) and vs30_tag == 3:
- print('CreateStation: Fetch National Crustal Model Vs for defined stations.')
+ if len(nan_loc) and vs30_tag == 3: # noqa: PLR2004
+ print('CreateStation: Fetch National Crustal Model Vs for defined stations.') # noqa: T201
selected_stn.loc[nan_loc, vs30_label] = get_vs30_ncm(
- selected_stn.iloc[
+ selected_stn.iloc[ # noqa: PD011
nan_loc, list(selected_stn.keys()).index(lat_label)
].values.tolist(),
- selected_stn.iloc[
+ selected_stn.iloc[ # noqa: PD011
nan_loc, list(selected_stn.keys()).index(lon_label)
].values.tolist(),
)
if len(nan_loc) and vs30_tag == 0:
- print('CreateStation: Fetch OpenSHA Vs30 map for defined stations.')
+ print('CreateStation: Fetch OpenSHA Vs30 map for defined stations.') # noqa: T201
selected_stn.loc[nan_loc, vs30_label] = get_site_vs30_from_opensha(
- selected_stn.iloc[
+ selected_stn.iloc[ # noqa: PD011
nan_loc, list(selected_stn.keys()).index(lat_label)
].values.tolist(),
- selected_stn.iloc[
+ selected_stn.iloc[ # noqa: PD011
nan_loc, list(selected_stn.keys()).index(lon_label)
].values.tolist(),
)
# Get zTR
- if zTR_label in selected_stn.keys():
- tmp = selected_stn.iloc[
+ if zTR_label in selected_stn.keys(): # noqa: SIM118
+ tmp = selected_stn.iloc[ # noqa: PD011
:, list(selected_stn.keys()).index(zTR_label)
].values.tolist()
if len(tmp):
@@ -286,37 +286,37 @@ def create_stations(
else:
nan_loc = list(range(len(selected_stn.index)))
if len(nan_loc) and zTR_tag == 0:
- print(
+ print( # noqa: T201
'CreateStation: Interpolating global depth to rock map for defined stations.'
)
selected_stn.loc[nan_loc, zTR_label] = [
max(0, x)
for x in get_zTR_global(
- selected_stn.iloc[
+ selected_stn.iloc[ # noqa: PD011
nan_loc, list(selected_stn.keys()).index(lat_label)
].values.tolist(),
- selected_stn.iloc[
+ selected_stn.iloc[ # noqa: PD011
nan_loc, list(selected_stn.keys()).index(lon_label)
].values.tolist(),
)
]
elif len(nan_loc) and zTR_tag == 1:
- print(
+ print( # noqa: T201
'CreateStation: Interpolating depth to rock map from National Crustal Model.'
)
selected_stn.loc[nan_loc, zTR_label] = [
max(0, x)
for x in get_zTR_ncm(
- selected_stn.iloc[
+ selected_stn.iloc[ # noqa: PD011
nan_loc, list(selected_stn.keys()).index(lat_label)
].values.tolist(),
- selected_stn.iloc[
+ selected_stn.iloc[ # noqa: PD011
nan_loc, list(selected_stn.keys()).index(lon_label)
].values.tolist(),
)
]
elif len(nan_loc):
- print(
+ print( # noqa: T201
'CreateStation: Default zore depth to rock for sites missing the data.'
)
selected_stn[zTR_label] = [0.0 for x in range(len(selected_stn.index))]
@@ -338,20 +338,20 @@ def create_stations(
# get soil model
if soil_flag:
# get soil_model
- soil_model = selected_stn.iloc[
+ soil_model = selected_stn.iloc[ # noqa: PD011
:, list(selected_stn.keys()).index('Model')
].values.tolist()
# elastic istropic model
- row_EI = [i for i, x in enumerate(soil_model) if x == 'EI']
+ row_EI = [i for i, x in enumerate(soil_model) if x == 'EI'] # noqa: N806
# Borja & Amier model
- row_BA = [i for i, x in enumerate(soil_model) if x == 'BA']
+ row_BA = [i for i, x in enumerate(soil_model) if x == 'BA'] # noqa: N806
# User-defined model
- row_USER = [i for i, x in enumerate(soil_model) if x == 'USER']
+ row_USER = [i for i, x in enumerate(soil_model) if x == 'USER'] # noqa: N806
if len(row_EI):
cur_param_list = ['Den']
for cur_param in cur_param_list:
- if cur_param in selected_stn.keys():
- tmp = selected_stn.iloc[
+ if cur_param in selected_stn.keys(): # noqa: SIM118
+ tmp = selected_stn.iloc[ # noqa: PD011
row_EI, list(selected_stn.keys()).index(cur_param)
].values.tolist()
if len(tmp):
@@ -369,8 +369,8 @@ def create_stations(
if len(row_BA):
cur_param_list = ['Su_rat', 'Den', 'h/G', 'm', 'h0', 'chi']
for cur_param in cur_param_list:
- if cur_param in selected_stn.keys():
- tmp = selected_stn.iloc[
+ if cur_param in selected_stn.keys(): # noqa: SIM118
+ tmp = selected_stn.iloc[ # noqa: PD011
row_BA, list(selected_stn.keys()).index(cur_param)
].values.tolist()
if len(tmp):
@@ -388,7 +388,7 @@ def create_stations(
user_param_list = []
if len(row_USER):
if soil_user_fun is None:
- print(
+ print( # noqa: T201
'CreateStation: no fetching is conducted for the User soil model- please ensure all needed parameters are defined.'
)
for cur_param in list(selected_stn.keys()):
@@ -407,7 +407,7 @@ def create_stations(
'h0',
'chi',
]:
- user_param_list.append(cur_param)
+ user_param_list.append(cur_param) # noqa: PERF401
else:
selected_stn = get_soil_model_user(selected_stn, soil_user_fun)
user_param_list = list(selected_stn.keys())
@@ -473,7 +473,7 @@ def create_stations(
z1_tag = z1Config['z1_tag']
if z1_tag == 1:
tmp.update({'z1pt0': get_z1(tmp['Vs30'])})
- elif z1_tag == 2:
+ elif z1_tag == 2: # noqa: PLR2004
z1pt0 = get_site_z1pt0_from_opensha(
tmp['Latitude'], tmp['Longitude']
)
@@ -494,7 +494,7 @@ def create_stations(
z25_tag = z25Config['z25_tag']
if z25_tag == 1:
tmp.update({'z2pt5': get_z25(tmp['z1pt0'])})
- elif z25_tag == 2:
+ elif z25_tag == 2: # noqa: PLR2004
z2pt5 = get_site_z2pt5_from_opensha(
tmp['Latitude'], tmp['Longitude']
)
@@ -513,7 +513,7 @@ def create_stations(
if soil_flag:
tmp.update({'Model': stn.get('Model', 'EI')})
- for cur_param in [
+ for cur_param in [ # noqa: RUF005
'Su_rat',
'Den',
'h/G',
@@ -526,7 +526,7 @@ def create_stations(
if stn.get('vsInferred'):
if stn.get('vsInferred') not in [0, 1]:
sys.exit(
- "CreateStation: Only '0' or '1' can be assigned to the"
+ "CreateStation: Only '0' or '1' can be assigned to the" # noqa: ISC003
+ " 'vsInferred' column in the Site File (.csv), where 0 stands for false and 1 stands for true."
)
# print(f"CreateStation: A value of 'vsInferred' is provided for station {stn_id} in the Site File (.csv)"+
@@ -580,11 +580,11 @@ def create_stations(
def create_gridded_stations(
input_file,
- output_file,
+ output_file, # noqa: ARG001
div_lon=2,
div_lat=2,
delta_lon=None,
- delta=None,
+ delta=None, # noqa: ARG001
):
"""Reading input csv file for the grid, generating stations, and saving data
to output json file
@@ -597,19 +597,19 @@ def create_gridded_stations(
delta_lat: delta degree along latitude
Output:
run_tag: 0 - success, 1 - input failure, 2 - output failure
- """
+ """ # noqa: D205, D400, D401
# Reading csv data
run_tag = 0
try:
gstn_df = pd.read_csv(input_file, header=0, index_col=0)
- except:
+ except: # noqa: E722
run_tag = 1
- return run_tag
- if np.max(gstn_df.index.values) != 2:
+ return run_tag # noqa: RET504
+ if np.max(gstn_df.index.values) != 2: # noqa: PLR2004
run_tag = 1
- return run_tag
- else:
- labels = gstn_df.columns.values
+ return run_tag # noqa: RET504
+ else: # noqa: RET505
+ labels = gstn_df.columns.values # noqa: PD011
lon_label, labels = get_label(
['Longitude', 'longitude', 'lon', 'Lon'], labels, 'longitude'
)
@@ -618,7 +618,7 @@ def create_gridded_stations(
)
lon_temp = []
lat_temp = []
- for gstn_id, gstn in gstn_df.iterrows():
+ for gstn_id, gstn in gstn_df.iterrows(): # noqa: B007
lon_temp.append(gstn[lon_label])
lat_temp.append(gstn[lat_label])
# Generating the grid
@@ -626,9 +626,9 @@ def create_gridded_stations(
dlat = (np.max(lat_temp) - np.min(lat_temp)) / div_lat
if delta_lon is not None:
delta_lon = np.min([delta_lon, dlon])
- if delta_lat is not None:
- delta_lat = np.min([delta_lat, dlat])
- glon, glat = np.meshgrid(
+ if delta_lat is not None: # noqa: F821
+ delta_lat = np.min([delta_lat, dlat]) # noqa: F821
+ glon, glat = np.meshgrid( # noqa: RET503
np.arange(np.min(lon_temp), np.max(lon_temp), delta_lon),
np.arange(np.min(lat_temp), np.max(lat_temp), delta_lat),
)
@@ -641,23 +641,23 @@ def get_vs30_global(lat, lon):
lon: list of longitude
Output:
vs30: list of vs30
- """
+ """ # noqa: D205, D400
import os
import pickle
from scipy import interpolate
# Loading global Vs30 data
- cwd = os.path.dirname(os.path.realpath(__file__))
- with open(cwd + '/database/site/global_vs30_4km.pkl', 'rb') as f:
- vs30_global = pickle.load(f)
+ cwd = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120
+ with open(cwd + '/database/site/global_vs30_4km.pkl', 'rb') as f: # noqa: PTH123
+ vs30_global = pickle.load(f) # noqa: S301
# Interpolation function (linear)
- interpFunc = interpolate.interp2d(
+ interpFunc = interpolate.interp2d( # noqa: N806
vs30_global['Longitude'], vs30_global['Latitude'], vs30_global['Vs30']
)
vs30 = [float(interpFunc(x, y)) for x, y in zip(lon, lat)]
# return
- return vs30
+ return vs30 # noqa: RET504
def get_vs30_thompson(lat, lon):
@@ -667,78 +667,78 @@ def get_vs30_thompson(lat, lon):
lon: list of longitude
Output:
vs30: list of vs30
- """
+ """ # noqa: D205, D400
import os
import pickle
from scipy import interpolate
# Loading Thompson Vs30 data
- cwd = os.path.dirname(os.path.realpath(__file__))
- with open(cwd + '/database/site/thompson_vs30_4km.pkl', 'rb') as f:
- vs30_thompson = pickle.load(f)
+ cwd = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120
+ with open(cwd + '/database/site/thompson_vs30_4km.pkl', 'rb') as f: # noqa: PTH123
+ vs30_thompson = pickle.load(f) # noqa: S301
# Interpolation function (linear)
# Thompson's map gives zero values for water-covered region and outside CA -> use 760 for default
- print(
+ print( # noqa: T201
'CreateStation: Warning - approximate 760 m/s for sites not supported by Thompson Vs30 map (water/outside CA).'
)
- vs30_thompson['Vs30'][vs30_thompson['Vs30'] < 0.1] = 760
- interpFunc = interpolate.interp2d(
+ vs30_thompson['Vs30'][vs30_thompson['Vs30'] < 0.1] = 760 # noqa: PLR2004
+ interpFunc = interpolate.interp2d( # noqa: N806
vs30_thompson['Longitude'], vs30_thompson['Latitude'], vs30_thompson['Vs30']
)
vs30 = [float(interpFunc(x, y)) for x, y in zip(lon, lat)]
# return
- return vs30
+ return vs30 # noqa: RET504
def get_z1(vs30):
- """Compute z1 based on the prediction equation by Chiou and Youngs (2013) (unit of vs30 is meter/second and z1 is meter)"""
+ """Compute z1 based on the prediction equation by Chiou and Youngs (2013) (unit of vs30 is meter/second and z1 is meter)""" # noqa: D400
z1 = np.exp(-7.15 / 4.0 * np.log((vs30**4 + 571.0**4) / (1360.0**4 + 571.0**4)))
# return
- return z1
+ return z1 # noqa: RET504
def get_z25(z1):
- """Compute z25 based on the prediction equation by Campbell and Bozorgnia (2013)"""
+ """Compute z25 based on the prediction equation by Campbell and Bozorgnia (2013)""" # noqa: D400
z25 = 0.748 + 2.218 * z1
# return
- return z25
+ return z25 # noqa: RET504
-def get_z25fromVs(vs):
+def get_z25fromVs(vs): # noqa: N802
"""Compute z25 (m) based on the prediction equation 33 by Campbell and Bozorgnia (2014)
Vs is m/s
- """
+ """ # noqa: D205, D400
z25 = (7.089 - 1.144 * np.log(vs)) * 1000
# return
- return z25
+ return z25 # noqa: RET504
-def get_zTR_global(lat, lon):
+def get_zTR_global(lat, lon): # noqa: N802
"""Interpolate depth to rock at given latitude and longitude
Input:
lat: list of latitude
lon: list of longitude
Output:
zTR: list of zTR
- """
+ """ # noqa: D205, D400
import os
import pickle
from scipy import interpolate
# Loading depth to rock data
- cwd = os.path.dirname(os.path.realpath(__file__))
- with open(cwd + '/database/site/global_zTR_4km.pkl', 'rb') as f:
- zTR_global = pickle.load(f)
+ cwd = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120
+ with open(cwd + '/database/site/global_zTR_4km.pkl', 'rb') as f: # noqa: PTH123
+ zTR_global = pickle.load(f) # noqa: S301, N806
# Interpolation function (linear)
- interpFunc = interpolate.interp2d(
+ interpFunc = interpolate.interp2d( # noqa: N806
zTR_global['Longitude'], zTR_global['Latitude'], zTR_global['zTR']
)
- zTR = [float(interpFunc(x, y)) for x, y in zip(lon, lat)]
+ zTR = [float(interpFunc(x, y)) for x, y in zip(lon, lat)] # noqa: N806
# return
- return zTR
+ return zTR # noqa: RET504
def export_site_prop(stn_file, output_dir, filename):
@@ -749,33 +749,33 @@ def export_site_prop(stn_file, output_dir, filename):
filename: output filename
Output:
run_tag: 0 - success, 1 - output failure
- """
+ """ # noqa: D205, D400, D401
import os
from pathlib import Path
- print(stn_file)
+ print(stn_file) # noqa: T201
station_name = ['site' + str(j) + '.csv' for j in range(len(stn_file))]
lat = [stn_file[j]['Latitude'] for j in range(len(stn_file))]
lon = [stn_file[j]['Longitude'] for j in range(len(stn_file))]
vs30 = [stn_file[j]['Vs30'] for j in range(len(stn_file))]
- df = pd.DataFrame(
+ df = pd.DataFrame( # noqa: PD901
{'GP_file': station_name, 'Longitude': lon, 'Latitude': lat, 'Vs30': vs30}
)
- df = pd.DataFrame.from_dict(stn_file)
+ df = pd.DataFrame.from_dict(stn_file) # noqa: PD901
- output_dir = os.path.join(
- os.path.dirname(Path(output_dir)),
- os.path.basename(Path(output_dir)),
+ output_dir = os.path.join( # noqa: PTH118
+ os.path.dirname(Path(output_dir)), # noqa: PTH120
+ os.path.basename(Path(output_dir)), # noqa: PTH119
)
try:
- os.makedirs(output_dir)
- except:
- print('HazardSimulation: output folder already exists.')
+ os.makedirs(output_dir) # noqa: PTH103
+ except: # noqa: E722
+ print('HazardSimulation: output folder already exists.') # noqa: T201
# save the csv
- df.to_csv(os.path.join(output_dir, filename), index=False)
+ df.to_csv(os.path.join(output_dir, filename), index=False) # noqa: PTH118
-def get_zTR_ncm(lat, lon):
+def get_zTR_ncm(lat, lon): # noqa: N802
"""Call USGS National Crustal Model services for zTR
https://earthquake.usgs.gov/nshmp/ncm
Input:
@@ -783,26 +783,26 @@ def get_zTR_ncm(lat, lon):
lon: list of longitude
Output:
zTR: list of depth to bedrock
- """
+ """ # noqa: D205, D400
import requests
- zTR = []
+ zTR = [] # noqa: N806
# Looping over sites
for cur_lat, cur_lon in zip(lat, lon):
url_geology = f'https://earthquake.usgs.gov/ws/nshmp/ncm/ws/nshmp/ncm/geologic-framework?location={cur_lat}%2C{cur_lon}'
# geological data (depth to bedrock)
- r1 = requests.get(url_geology)
+ r1 = requests.get(url_geology) # noqa: S113
cur_res = r1.json()
if not cur_res['response']['results'][0]['profiles']:
# the current site is out of the available range of NCM (Western US only, 06/2021)
# just append 0.0 to zTR
- print(
+ print( # noqa: T201
f'CreateStation: Warning in NCM API call - could not get the site geological data and approximate 0.0 for zTR for site {cur_lat}, {cur_lon}'
)
zTR.append(0.0)
continue
- else:
+ else: # noqa: RET507
# get the top bedrock data
zTR.append(abs(cur_res['response']['results'][0]['profiles'][0]['top']))
# return
@@ -818,26 +818,26 @@ def get_vsp_ncm(lat, lon, depth):
depth: [depthMin, depthInc, depthMax]
Output:
vsp: list of shear-wave velocity profile
- """
+ """ # noqa: D205, D400
import requests
vsp = []
- depthMin, depthInc, depthMax = (abs(x) for x in depth)
+ depthMin, depthInc, depthMax = (abs(x) for x in depth) # noqa: N806
# Looping over sites
for cur_lat, cur_lon in zip(lat, lon):
url_geophys = f'https://earthquake.usgs.gov/ws/nshmp/ncm/ws/nshmp/ncm/geophysical?location={cur_lat}%2C{cur_lon}&depths={depthMin}%2C{depthInc}%2C{depthMax}'
- r1 = requests.get(url_geophys)
+ r1 = requests.get(url_geophys) # noqa: S113
cur_res = r1.json()
if cur_res['status'] == 'error':
# the current site is out of the available range of NCM (Western US only, 06/2021)
# just append -1 to zTR
- print(
+ print( # noqa: T201
'CreateStation: Warning in NCM API call - could not get the site geopyhsical data.'
)
vsp.append([])
continue
- else:
+ else: # noqa: RET507
# get vs30 profile
vsp.append(
[abs(x) for x in cur_res['response']['results'][0]['profile']['vs']]
@@ -855,15 +855,15 @@ def compute_vs30_from_vsp(depthp, vsp):
vsp: Vs profile
Output:
vs30p: average VS for the upper 30-m depth
- """
+ """ # noqa: D205, D400
# Computing the depth interval
- delta_depth = np.diff([0] + depthp)
+ delta_depth = np.diff([0] + depthp) # noqa: RUF005
# Computing the wave-travel time
delta_t = [x / y for x, y in zip(delta_depth, vsp)]
# Computing the Vs30
vs30p = 30.0 / np.sum(delta_t)
# return
- return vs30p
+ return vs30p # noqa: RET504
def get_vs30_ncm(lat, lon):
@@ -873,7 +873,7 @@ def get_vs30_ncm(lat, lon):
lon: list of longitude
Output:
vs30: list of vs30
- """
+ """ # noqa: D205, D400
# Depth list (in meter)
depth = [1.0, 1.0, 30.0]
depthp = np.arange(depth[0], depth[2] + 1.0, depth[1])
@@ -885,7 +885,7 @@ def get_vs30_ncm(lat, lon):
if cur_vsp:
vs30.append(compute_vs30_from_vsp(depthp, cur_vsp))
else:
- print(
+ print( # noqa: T201
'CreateStation: Warning - approximate 760 m/s for sites not supported by NCM (Western US).'
)
vs30.append(760.0)
@@ -897,10 +897,10 @@ def get_soil_model_ba(param=None):
"""Get modeling parameters for Borja and Amies 1994 J2 model
Currently just assign default values
Can be extended to have input soil properties to predict this parameters
- """
+ """ # noqa: D205, D400
su_rat = 0.26
density = 2.0
- h_to_G = 1.0
+ h_to_G = 1.0 # noqa: N806
m = 1.0
h0 = 0.2
chi = 0.0
@@ -927,7 +927,7 @@ def get_soil_model_ei(param=None):
"""Get modeling parameters for elastic isotropic
Currently just assign default values
Can be extended to have input soil properties to predict this parameter
- """
+ """ # noqa: D205, D400
density = 2.0
if param == 'Den':
@@ -938,14 +938,14 @@ def get_soil_model_ei(param=None):
return res
-def get_soil_model_user(df_stn, model_fun):
+def get_soil_model_user(df_stn, model_fun): # noqa: D103
# check if mode_fun exists
import importlib
import os
import sys
- if not os.path.isfile(model_fun):
- print(f'CreateStation.get_soil_model_user: {model_fun} is not found.')
+ if not os.path.isfile(model_fun): # noqa: PTH113
+ print(f'CreateStation.get_soil_model_user: {model_fun} is not found.') # noqa: T201
return df_stn, []
# try to load the model file
@@ -958,15 +958,15 @@ def get_soil_model_user(df_stn, model_fun):
user_model = importlib.__import__(
path_model_fun.name[:-3], globals(), locals(), [], 0
)
- except:
- print(f'CreateStation.get_soil_model_user: {model_fun} cannot be loaded.')
+ except: # noqa: E722
+ print(f'CreateStation.get_soil_model_user: {model_fun} cannot be loaded.') # noqa: T201
return df_stn
# try to load the standard function: soil_model_fun(site_info=None)
try:
soil_model = user_model.soil_model
- except:
- print(
+ except: # noqa: E722
+ print( # noqa: T201
f'CreateStation.get_soil_model_user: soil_model is nto found in {model_fun}.'
)
return df_stn
@@ -974,8 +974,8 @@ def get_soil_model_user(df_stn, model_fun):
# get the parameters from soil_model_fun
try:
df_stn_new = soil_model(site_info=df_stn)
- except:
- print(
+ except: # noqa: E722
+ print( # noqa: T201
'CreateStation.get_soil_model_user: error in soil_model_fun(site_info=None).'
)
return df_stn
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py
index 283e797cc..daf1d1dba 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenQuake.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -53,9 +53,9 @@
default_oq_version = '3.17.1'
-def openquake_config(site_info, scen_info, event_info, workDir):
- dir_input = os.path.join(workDir, 'Input')
- dir_output = os.path.join(workDir, 'Output')
+def openquake_config(site_info, scen_info, event_info, workDir): # noqa: C901, N803, D103, PLR0912, PLR0915
+ dir_input = os.path.join(workDir, 'Input') # noqa: PTH118
+ dir_output = os.path.join(workDir, 'Output') # noqa: PTH118
import configparser
cfg = configparser.ConfigParser()
@@ -87,17 +87,17 @@ def openquake_config(site_info, scen_info, event_info, workDir):
]:
filename_ini = scen_info['EqRupture'].get('ConfigFile', None)
if filename_ini is None:
- print(
+ print( # noqa: T201
"FetchOpenQuake: please specify Scenario['EqRupture']['ConfigFile']."
)
return 0
- else:
- filename_ini = os.path.join(dir_input, filename_ini)
+ else: # noqa: RET505
+ filename_ini = os.path.join(dir_input, filename_ini) # noqa: PTH118
# updating the export_dir
cfg.read(filename_ini)
cfg['output']['export_dir'] = dir_output
else:
- print(
+ print( # noqa: T201
"FetchOpenQuake: please specify Scenario['Generator'], options: OpenQuakeScenario, OpenQuakeEventBased, OpenQuakeClassicalPSHA, or OpenQuakeUserConfig."
)
return 0
@@ -107,14 +107,14 @@ def openquake_config(site_info, scen_info, event_info, workDir):
'OpenQuakeClassicalPSHA-User',
]:
# sites
- tmpSites = pd.read_csv(
- os.path.join(dir_input, site_info['input_file']),
+ tmpSites = pd.read_csv( # noqa: N806
+ os.path.join(dir_input, site_info['input_file']), # noqa: PTH118
header=0,
index_col=0,
)
- tmpSitesLoc = tmpSites.loc[:, ['Longitude', 'Latitude']]
+ tmpSitesLoc = tmpSites.loc[:, ['Longitude', 'Latitude']] # noqa: N806
tmpSitesLoc.loc[site_info['min_ID'] : site_info['max_ID']].to_csv(
- os.path.join(dir_input, 'sites_oq.csv'),
+ os.path.join(dir_input, 'sites_oq.csv'), # noqa: PTH118
header=False,
index=False,
)
@@ -128,8 +128,8 @@ def openquake_config(site_info, scen_info, event_info, workDir):
cfg['site_params'] = {'site_model_file': site_info['output_file']}
# copy that file to the rundir
shutil.copy(
- os.path.join(dir_input, site_info['output_file']),
- os.path.join(dir_output, site_info['output_file']),
+ os.path.join(dir_input, site_info['output_file']), # noqa: PTH118
+ os.path.join(dir_output, site_info['output_file']), # noqa: PTH118
)
# im type and period
@@ -141,7 +141,7 @@ def openquake_config(site_info, scen_info, event_info, workDir):
if jj % 2:
tmp.append(cur_tmp)
im_type = []
- tmp_T = []
+ tmp_T = [] # noqa: N806
for cur_tmp in tmp:
if 'PGA' in cur_tmp:
im_type = 'PGA'
@@ -152,10 +152,10 @@ def openquake_config(site_info, scen_info, event_info, workDir):
pass
event_info['IntensityMeasure']['Type'] = im_type
event_info['IntensityMeasure']['Periods'] = tmp_T
- cfg['calculation']['source_model_logic_tree_file'] = os.path.join(
+ cfg['calculation']['source_model_logic_tree_file'] = os.path.join( # noqa: PTH118
cfg['calculation'].get('source_model_logic_tree_file')
)
- cfg['calculation']['gsim_logic_tree_file'] = os.path.join(
+ cfg['calculation']['gsim_logic_tree_file'] = os.path.join( # noqa: PTH118
cfg['calculation'].get('gsim_logic_tree_file')
)
else:
@@ -164,7 +164,7 @@ def openquake_config(site_info, scen_info, event_info, workDir):
# tmpSitesLoc = tmpSites.loc[:, ['Longitude','Latitude']]
# tmpSitesLoc.to_csv(os.path.join(dir_input, 'sites_oq.csv'), header=False, index=False)
# cfg['geometry'] = {'sites_csv': 'sites_oq.csv'}
- cfg['geometry'] = {'sites_csv': os.path.basename(site_info['siteFile'])}
+ cfg['geometry'] = {'sites_csv': os.path.basename(site_info['siteFile'])} # noqa: PTH119
# rupture
cfg['erf'] = {
'rupture_mesh_spacing': scen_info['EqRupture'].get('RupMesh', 2.0),
@@ -176,7 +176,7 @@ def openquake_config(site_info, scen_info, event_info, workDir):
# site_params (saved in the output_file)
cfg['site_params'] = {'site_model_file': 'tmp_oq_site_model.csv'}
# hazard_calculation
- mapGMPE = {
+ mapGMPE = { # noqa: N806
'Abrahamson, Silva & Kamai (2014)': 'AbrahamsonEtAl2014',
'AbrahamsonEtAl2014': 'AbrahamsonEtAl2014',
'Boore, Stewart, Seyhan & Atkinson (2014)': 'BooreEtAl2014',
@@ -190,7 +190,7 @@ def openquake_config(site_info, scen_info, event_info, workDir):
if scen_info['EqRupture']['Type'] == 'oqSourceXML': # OpenQuakeScenario
imt = ''
if event_info['IntensityMeasure']['Type'] == 'SA':
- for curT in event_info['IntensityMeasure']['Periods']:
+ for curT in event_info['IntensityMeasure']['Periods']: # noqa: N806
imt = imt + 'SA(' + str(curT) + '), '
imt = imt[:-2]
else:
@@ -213,7 +213,7 @@ def openquake_config(site_info, scen_info, event_info, workDir):
)
imt_scale = event_info['IntensityMeasure'].get('Scale', 'Log')
if event_info['IntensityMeasure']['Type'] == 'SA':
- for curT in event_info['IntensityMeasure']['Periods']:
+ for curT in event_info['IntensityMeasure']['Periods']: # noqa: N806
# imt = imt + '"SA(' + str(curT) + ')": {}, '.format(imt_levels)
if imt_scale == 'Log':
imt = (
@@ -271,7 +271,7 @@ def openquake_config(site_info, scen_info, event_info, workDir):
)
imt_scale = event_info['IntensityMeasure'].get('Scale', 'Log')
if event_info['IntensityMeasure']['Type'] == 'SA':
- for curT in event_info['IntensityMeasure']['Periods']:
+ for curT in event_info['IntensityMeasure']['Periods']: # noqa: N806
# imt = imt + '"SA(' + str(curT) + ')": {}, '.format(imt_levels)
if imt_scale == 'Log':
imt = (
@@ -320,7 +320,7 @@ def openquake_config(site_info, scen_info, event_info, workDir):
),
'maximum_distance': scen_info['EqRupture'].get('max_Dist', 500.0),
}
- cfg_quan = ''
+ cfg_quan = '' # noqa: F841
cfg['output'] = {
'export_dir': dir_output,
'individual_curves': scen_info['EqRupture'].get(
@@ -348,42 +348,42 @@ def openquake_config(site_info, scen_info, event_info, workDir):
),
}
else:
- print(
+ print( # noqa: T201
"FetchOpenQuake: please specify Scenario['Generator'], options: OpenQuakeScenario, OpenQuakeEventBased, OpenQuakeClassicalPSHA, or OpenQuakeUserConfig."
)
return 0
# Write the ini
- filename_ini = os.path.join(dir_input, 'oq_job.ini')
- with open(filename_ini, 'w') as configfile:
+ filename_ini = os.path.join(dir_input, 'oq_job.ini') # noqa: PTH118
+ with open(filename_ini, 'w') as configfile: # noqa: PTH123
cfg.write(configfile)
# openquake module
oq_ver_loaded = None
try:
from importlib_metadata import version
- except:
+ except: # noqa: E722
from importlib.metadata import version
if scen_info['EqRupture'].get('OQLocal', None):
# using user-specific local OQ
# first to validate the path
- if not os.path.isdir(scen_info['EqRupture'].get('OQLocal')):
- print(
+ if not os.path.isdir(scen_info['EqRupture'].get('OQLocal')): # noqa: PTH112
+ print( # noqa: T201
'FetchOpenQuake: Local OpenQuake instance {} not found.'.format(
scen_info['EqRupture'].get('OQLocal')
)
)
return 0
- else:
+ else: # noqa: RET505
# getting version
try:
oq_ver = version('openquake.engine')
if oq_ver:
- print(
+ print( # noqa: T201
f'FetchOpenQuake: Removing previous installation of OpenQuake {oq_ver}.'
)
sys.modules.pop('openquake')
- subprocess.check_call(
+ subprocess.check_call( # noqa: S603
[
sys.executable,
'-m',
@@ -393,18 +393,18 @@ def openquake_config(site_info, scen_info, event_info, workDir):
'openquake.engine',
]
)
- except:
+ except: # noqa: E722
# no installed OQ python package
# do nothing
- print(
+ print( # noqa: T201
'FetchOpenQuake: No previous installation of OpenQuake python package found.'
)
# load the local OQ
try:
- print('FetchOpenQuake: Setting up the user-specified local OQ.')
+ print('FetchOpenQuake: Setting up the user-specified local OQ.') # noqa: T201
sys.path.insert(
0,
- os.path.dirname(scen_info['EqRupture'].get('OQLocal')),
+ os.path.dirname(scen_info['EqRupture'].get('OQLocal')), # noqa: PTH120
)
# owd = os.getcwd()
# os.chdir(os.path.dirname(scen_info['EqRupture'].get('OQLocal')))
@@ -415,8 +415,8 @@ def openquake_config(site_info, scen_info, event_info, workDir):
oq_ver_loaded = baselib.__version__
# sys.modules.pop('openquake')
# os.chdir(owd)
- except:
- print(
+ except: # noqa: E722
+ print( # noqa: T201
'FetchOpenQuake: {} cannot be loaded.'.format(
scen_info['EqRupture'].get('OQLocal')
)
@@ -427,13 +427,13 @@ def openquake_config(site_info, scen_info, event_info, workDir):
try:
oq_ver = version('openquake.engine')
if oq_ver != scen_info['EqRupture'].get('OQVersion', default_oq_version):
- print(
+ print( # noqa: T201
'FetchOpenQuake: Required OpenQuake version is not found and being installed now.'
)
if oq_ver:
# pop the old version first
sys.modules.pop('openquake')
- subprocess.check_call(
+ subprocess.check_call( # noqa: S603
[
sys.executable,
'-m',
@@ -445,7 +445,7 @@ def openquake_config(site_info, scen_info, event_info, workDir):
)
# install the required version
- subprocess.check_call(
+ subprocess.check_call( # noqa: S603
[
sys.executable,
'-m',
@@ -463,12 +463,12 @@ def openquake_config(site_info, scen_info, event_info, workDir):
else:
oq_ver_loaded = oq_ver
- except:
- print(
+ except: # noqa: E722
+ print( # noqa: T201
'FetchOpenQuake: No OpenQuake is not found and being installed now.'
)
try:
- subprocess.check_call(
+ subprocess.check_call( # noqa: S603
[
sys.executable,
'-m',
@@ -482,14 +482,14 @@ def openquake_config(site_info, scen_info, event_info, workDir):
]
)
oq_ver_loaded = version('openquake.engine')
- except:
- print(
+ except: # noqa: E722
+ print( # noqa: T201
'FetchOpenQuake: Install of OpenQuake {} failed - please check the version.'.format(
scen_info['EqRupture'].get('OQVersion', default_oq_version)
)
)
- print('FetchOpenQuake: OpenQuake configured.')
+ print('FetchOpenQuake: OpenQuake configured.') # noqa: T201
# return
return filename_ini, oq_ver_loaded, event_info
@@ -547,7 +547,7 @@ def get_cfg(job_ini):
"""
-def oq_run_classical_psha(
+def oq_run_classical_psha( # noqa: C901
job_ini,
exports='csv',
oq_version=default_oq_version,
@@ -558,15 +558,15 @@ def oq_run_classical_psha(
:param job_ini:
Path to configuration file/archive or
dictionary of parameters with at least a key "calculation_mode"
- """
+ """ # noqa: D400
# the run() method has been turned into private since v3.11
# the get_last_calc_id() and get_datadir() have been moved to commonlib.logs since v3.12
# the datastore has been moved to commonlib since v3.12
# Note: the extracting realizations method was kindly shared by Dr. Anne Husley
vtag = int(oq_version.split('.')[1])
- if vtag <= 10:
+ if vtag <= 10: # noqa: PLR2004
try:
- print(f'FetchOpenQuake: running Version {oq_version}.')
+ print(f'FetchOpenQuake: running Version {oq_version}.') # noqa: T201
# reloading
# run.main([job_ini], exports=exports)
# invoke/modify deeper openquake commands here to make it compatible with
@@ -591,17 +591,17 @@ def oq_run_classical_psha(
concurrent_tasks = None
pdb = None
hc_id = None
- for i in range(1000):
+ for i in range(1000): # noqa: B007
try:
calc_id = logs.init('nojob', getattr(logging, loglevel.upper()))
- except:
+ except: # noqa: PERF203, E722
time.sleep(0.01)
continue
else:
- print('FetchOpenQuake: log created.')
+ print('FetchOpenQuake: log created.') # noqa: T201
break
# disable gzip_input
- base.BaseCalculator.gzip_inputs = lambda self: None
+ base.BaseCalculator.gzip_inputs = lambda self: None # noqa: ARG005
with performance.Monitor('total runtime', measuremem=True) as monitor:
if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):
os.environ['OQ_DISTRIBUTE'] = 'processpool'
@@ -611,7 +611,7 @@ def oq_run_classical_psha(
try:
hc_id = calc_ids[hc_id]
except IndexError:
- raise SystemExit(
+ raise SystemExit( # noqa: B904
'There are %d old calculations, cannot '
'retrieve the %s' % (len(calc_ids), hc_id)
)
@@ -625,15 +625,15 @@ def oq_run_classical_psha(
)
calc_id = datastore.get_last_calc_id()
- path = os.path.join(datastore.get_datadir(), 'calc_%d.hdf5' % calc_id)
+ path = os.path.join(datastore.get_datadir(), 'calc_%d.hdf5' % calc_id) # noqa: PTH118
dstore = datastore.read(path)
export_realizations('realizations', dstore)
- except:
- print('FetchOpenQuake: Classical PSHA failed.')
+ except: # noqa: E722
+ print('FetchOpenQuake: Classical PSHA failed.') # noqa: T201
return 1
- elif vtag == 11:
+ elif vtag == 11: # noqa: PLR2004
try:
- print(f'FetchOpenQuake: running Version {oq_version}.')
+ print(f'FetchOpenQuake: running Version {oq_version}.') # noqa: T201
# reloading
# run.main([job_ini], exports=exports)
# invoke/modify deeper openquake commands here to make it compatible with
@@ -657,17 +657,17 @@ def oq_run_classical_psha(
reuse_input = False
concurrent_tasks = None
pdb = False
- for i in range(1000):
+ for i in range(1000): # noqa: B007
try:
calc_id = logs.init('nojob', getattr(logging, loglevel.upper()))
- except:
+ except: # noqa: PERF203, E722
time.sleep(0.01)
continue
else:
- print('FetchOpenQuake: log created.')
+ print('FetchOpenQuake: log created.') # noqa: T201
break
# disable gzip_input
- base.BaseCalculator.gzip_inputs = lambda self: None
+ base.BaseCalculator.gzip_inputs = lambda self: None # noqa: ARG005
with performance.Monitor('total runtime', measuremem=True) as monitor:
if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):
os.environ['OQ_DISTRIBUTE'] = 'processpool'
@@ -680,7 +680,7 @@ def oq_run_classical_psha(
try:
params['hazard_calculation_id'] = str(calc_ids[hc_id])
except IndexError:
- raise SystemExit(
+ raise SystemExit( # noqa: B904
'There are %d old calculations, cannot '
'retrieve the %s' % (len(calc_ids), hc_id)
)
@@ -691,15 +691,15 @@ def oq_run_classical_psha(
calc.run(concurrent_tasks=concurrent_tasks, pdb=pdb, exports=exports)
calc_id = datastore.get_last_calc_id()
- path = os.path.join(datastore.get_datadir(), 'calc_%d.hdf5' % calc_id)
+ path = os.path.join(datastore.get_datadir(), 'calc_%d.hdf5' % calc_id) # noqa: PTH118
dstore = datastore.read(path)
export_realizations('realizations', dstore)
- except:
- print('FetchOpenQuake: Classical PSHA failed.')
+ except: # noqa: E722
+ print('FetchOpenQuake: Classical PSHA failed.') # noqa: T201
return 1
else:
try:
- print(f'FetchOpenQuake: running Version {oq_version}.')
+ print(f'FetchOpenQuake: running Version {oq_version}.') # noqa: T201
# reloading
# run.main([job_ini], exports=exports)
# invoke/modify deeper openquake commands here to make it compatible with
@@ -713,25 +713,25 @@ def oq_run_classical_psha(
from openquake.server import dbserver
dbserver.ensure_on()
- global calc_path
+ global calc_path # noqa: PLW0602
loglevel = 'info'
params = {}
reuse_input = False
concurrent_tasks = None
pdb = False
- for i in range(1000):
+ for i in range(1000): # noqa: B007
try:
log = logs.init(
'job', job_ini, getattr(logging, loglevel.upper())
)
- except:
+ except: # noqa: PERF203, E722
time.sleep(0.01)
continue
else:
- print('FetchOpenQuake: log created.')
+ print('FetchOpenQuake: log created.') # noqa: T201
break
log.params.update(params)
- base.BaseCalculator.gzip_inputs = lambda self: None
+ base.BaseCalculator.gzip_inputs = lambda self: None # noqa: ARG005
with log, performance.Monitor(
'total runtime', measuremem=True
) as monitor:
@@ -742,73 +742,73 @@ def oq_run_classical_psha(
logging.info('Total time spent: %s s', monitor.duration)
logging.info('Memory allocated: %s', general.humansize(monitor.mem))
- print('See the output with silx view %s' % calc.datastore.filename)
+ print('See the output with silx view %s' % calc.datastore.filename) # noqa: T201, UP031
calc_id = logs.get_last_calc_id()
- path = os.path.join(logs.get_datadir(), 'calc_%d.hdf5' % calc_id)
+ path = os.path.join(logs.get_datadir(), 'calc_%d.hdf5' % calc_id) # noqa: PTH118
dstore = datastore.read(path)
export_realizations('realizations', dstore)
- except:
- print('FetchOpenQuake: Classical PSHA failed.')
+ except: # noqa: E722
+ print('FetchOpenQuake: Classical PSHA failed.') # noqa: T201
return 1
# h5 clear for stampede2 (this is somewhat inelegant...)
if 'stampede2' in socket.gethostname():
# h5clear
if oq_h5clear(path) == 0:
- print('FetchOpenQuake.oq_run_classical_psha: h5clear completed')
+ print('FetchOpenQuake.oq_run_classical_psha: h5clear completed') # noqa: T201
else:
- print('FetchOpenQuake.oq_run_classical_psha: h5clear failed')
+ print('FetchOpenQuake.oq_run_classical_psha: h5clear failed') # noqa: T201
# copy the calc file to output directory
if dir_info:
dir_output = dir_info['Output']
try:
shutil.copy2(path, dir_output)
- print('FetchOpenQuake: calc hdf file saved.')
- except:
- print('FetchOpenQuake: failed to copy calc hdf file.')
+ print('FetchOpenQuake: calc hdf file saved.') # noqa: T201
+ except: # noqa: E722
+ print('FetchOpenQuake: failed to copy calc hdf file.') # noqa: T201
return 0
-def oq_h5clear(hdf5_file):
+def oq_h5clear(hdf5_file): # noqa: D103
# h5clear = os.path.join(os.path.dirname(os.path.abspath(__file__)),'lib/hdf5/bin/h5clear')
# print(h5clear)
- print(hdf5_file)
+ print(hdf5_file) # noqa: T201
# subprocess.run(["chmod", "a+rx", h5clear])
- subprocess.run(['chmod', 'a+rx', hdf5_file], check=False)
- tmp = subprocess.run(['h5clear', '-s', hdf5_file], check=False)
- print(tmp)
+ subprocess.run(['chmod', 'a+rx', hdf5_file], check=False) # noqa: S603, S607
+ tmp = subprocess.run(['h5clear', '-s', hdf5_file], check=False) # noqa: S603, S607
+ print(tmp) # noqa: T201
run_flag = tmp.returncode
- return run_flag
+ return run_flag # noqa: RET504
def oq_read_uhs_classical_psha(scen_info, event_info, dir_info):
- """Collect the UHS from a classical PSHA by OpenQuake"""
+ """Collect the UHS from a classical PSHA by OpenQuake""" # noqa: D400
import glob
import random
# number of scenario
num_scen = scen_info['Number']
if num_scen > 1:
- print('FetchOpenQuake: currently only supporting a single scenario for PHSA')
+ print('FetchOpenQuake: currently only supporting a single scenario for PHSA') # noqa: T201
num_scen = 1
# number of realizations per site
num_rlz = event_info['NumberPerSite']
# directory of the UHS
res_dir = dir_info['Output']
# mean UHS
- cur_uhs_file = glob.glob(os.path.join(res_dir, 'hazard_uhs-mean_*.csv'))[0]
- print(cur_uhs_file)
+ cur_uhs_file = glob.glob(os.path.join(res_dir, 'hazard_uhs-mean_*.csv'))[0] # noqa: PTH118, PTH207
+ print(cur_uhs_file) # noqa: T201
# read csv
tmp = pd.read_csv(cur_uhs_file, skiprows=1)
# number of stations
num_stn = len(tmp.index)
# number of IMs
- num_IMs = len(tmp.columns) - 2
+ num_IMs = len(tmp.columns) - 2 # noqa: N806
# IM list
- list_IMs = tmp.columns.tolist()[2:]
+ list_IMs = tmp.columns.tolist()[2:] # noqa: N806
im_list = [x.split('~')[1] for x in list_IMs]
ln_psa_mr = []
mag_maf = []
@@ -821,23 +821,23 @@ def oq_read_uhs_classical_psha(scen_info, event_info, dir_info):
else:
num_r1 = np.min(
[
- len(glob.glob(os.path.join(res_dir, 'hazard_uhs-rlz-*.csv'))),
+ len(glob.glob(os.path.join(res_dir, 'hazard_uhs-rlz-*.csv'))), # noqa: PTH118, PTH207
num_rlz,
]
)
- for i in range(num_r1):
- cur_uhs_file = glob.glob(
- os.path.join(res_dir, 'hazard_uhs-rlz-*.csv')
+ for i in range(num_r1): # noqa: PLW2901
+ cur_uhs_file = glob.glob( # noqa: PTH207
+ os.path.join(res_dir, 'hazard_uhs-rlz-*.csv') # noqa: PTH118
)[i]
tmp = pd.read_csv(cur_uhs_file, skiprows=1)
ln_psa[:, :, i] = np.log(tmp.iloc[:, 2:])
if num_rlz > num_r1:
# randomly resampling available spectra
- for i in range(num_rlz - num_r1):
+ for i in range(num_rlz - num_r1): # noqa: PLW2901
rnd_tag = random.randrange(num_r1)
- print(int(rnd_tag))
- cur_uhs_file = glob.glob(
- os.path.join(res_dir, 'hazard_uhs-rlz-*.csv')
+ print(int(rnd_tag)) # noqa: T201
+ cur_uhs_file = glob.glob( # noqa: PTH207
+ os.path.join(res_dir, 'hazard_uhs-rlz-*.csv') # noqa: PTH118
)[int(rnd_tag)]
tmp = pd.read_csv(cur_uhs_file, skiprows=1)
ln_psa[:, :, i] = np.log(tmp.iloc[:, 2:])
@@ -848,21 +848,21 @@ def oq_read_uhs_classical_psha(scen_info, event_info, dir_info):
return ln_psa_mr, mag_maf, im_list
-class OpenQuakeHazardCalc:
- def __init__(
+class OpenQuakeHazardCalc: # noqa: D101
+ def __init__( # noqa: C901
self,
job_ini,
event_info,
oq_version,
dir_info=None,
- no_distribute=False,
+ no_distribute=False, # noqa: FBT002
):
"""Initialize a calculation (reinvented from openquake.engine.engine)
:param job_ini:
Path to configuration file/archive or
dictionary of parameters with at least a key "calculation_mode"
- """
+ """ # noqa: D400
self.vtag = int(oq_version.split('.')[1])
self.dir_info = dir_info
@@ -871,22 +871,22 @@ def __init__(
)
from openquake.commonlib import logs, readinput
- if self.vtag >= 12:
+ if self.vtag >= 12: # noqa: PLR2004
from openquake.commonlib import datastore
else:
from openquake.baselib import datastore
from openquake.calculators import base
from openquake.server import dbserver
- user_name = getpass.getuser()
+ user_name = getpass.getuser() # noqa: F841
if no_distribute:
os.environ['OQ_DISTRIBUTE'] = 'no'
# check if the datadir exists
datadir = datastore.get_datadir()
- if not os.path.exists(datadir):
- os.makedirs(datadir)
+ if not os.path.exists(datadir): # noqa: PTH110
+ os.makedirs(datadir) # noqa: PTH103
# dbserver.ensure_on()
if dbserver.get_status() == 'not-running':
@@ -901,9 +901,9 @@ def __init__(
# Here is a trick to activate OpenQuake's dbserver
# We first cd to the openquake directory and invoke subprocess to open/hold on dbserver
# Then, we cd back to the original working directory
- owd = os.getcwd()
- os.chdir(os.path.dirname(os.path.realpath(__file__)))
- self.prc = subprocess.Popen(
+ owd = os.getcwd() # noqa: PTH109
+ os.chdir(os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
+ self.prc = subprocess.Popen( # noqa: S603
[sys.executable, '-m', 'openquake.commands', 'dbserver', 'start']
)
os.chdir(owd)
@@ -931,13 +931,13 @@ def __init__(
# Create a job
# self.job = logs.init("job", job_ini, logging.INFO, None, None, None)
- if self.vtag >= 11:
+ if self.vtag >= 11: # noqa: PLR2004
dic = readinput.get_params(job_ini)
else:
dic = readinput.get_params([job_ini])
# dic['hazard_calculation_id'] = self.job.calc_id
- if self.vtag >= 12:
+ if self.vtag >= 12: # noqa: PLR2004
# Create the job log
self.log = logs.init('job', dic, logging.INFO, None, None, None)
# Get openquake parameters
@@ -953,20 +953,20 @@ def __init__(
# Create the calculator
self.calculator.from_engine = True
- print('FetchOpenQuake: OpenQuake Hazard Calculator initiated.')
+ print('FetchOpenQuake: OpenQuake Hazard Calculator initiated.') # noqa: T201
- def run_calc(self):
- """Run a calculation and return results (reinvented from openquake.calculators.base)"""
+ def run_calc(self): # noqa: C901
+ """Run a calculation and return results (reinvented from openquake.calculators.base)""" # noqa: D400
from openquake.baselib import config, performance, zeromq
from openquake.calculators import base, getters
- if self.vtag >= 11:
+ if self.vtag >= 11: # noqa: PLR2004
from openquake.baselib import version
else:
from openquake.baselib import __version__ as version
- with self.calculator._monitor:
- self.calculator._monitor.username = ''
+ with self.calculator._monitor: # noqa: SLF001
+ self.calculator._monitor.username = '' # noqa: SLF001
try:
# Pre-execute setups
self.calculator.pre_execute()
@@ -997,12 +997,12 @@ def run_calc(self):
'There is no rupture_model, the calculator will just '
'import data without performing any calculation'
)
- fake = logictree.FullLogicTree.fake()
+ fake = logictree.FullLogicTree.fake() # noqa: F821
dstore['full_lt'] = fake # needed to expose the outputs
dstore['weights'] = [1.0]
return {}
else: # scenario
- self.calculator._read_scenario_ruptures()
+ self.calculator._read_scenario_ruptures() # noqa: SLF001
if (
oq.ground_motion_fields is False
and oq.hazard_curves_from_gmfs is False
@@ -1011,7 +1011,7 @@ def run_calc(self):
# Intensity measure models
if oq.ground_motion_fields:
- if self.vtag >= 12:
+ if self.vtag >= 12: # noqa: PLR2004
imts = oq.get_primary_imtls()
nrups = len(dstore['ruptures'])
base.create_gmf_data(dstore, imts, oq.get_sec_imts())
@@ -1024,7 +1024,7 @@ def run_calc(self):
(nrups,),
fillvalue=None,
)
- elif self.vtag == 11:
+ elif self.vtag == 11: # noqa: PLR2004
imts = oq.get_primary_imtls()
nrups = len(dstore['ruptures'])
base.create_gmf_data(dstore, len(imts), oq.get_sec_imts())
@@ -1043,13 +1043,13 @@ def run_calc(self):
# Prepare inputs for GmfGetter
nr = len(dstore['ruptures'])
logging.info(f'Reading {nr:_d} ruptures')
- if self.vtag >= 12:
+ if self.vtag >= 12: # noqa: PLR2004
rgetters = getters.get_rupture_getters(
dstore,
oq.concurrent_tasks * 1.25,
srcfilter=self.calculator.srcfilter,
)
- elif self.vtag == 11:
+ elif self.vtag == 11: # noqa: PLR2004
rgetters = getters.gen_rupture_getters(
dstore, oq.concurrent_tasks
)
@@ -1062,12 +1062,12 @@ def run_calc(self):
mon = performance.Monitor()
mon.version = version
mon.config = config
- rcvr = 'tcp://%s:%s' % (
+ rcvr = 'tcp://%s:%s' % ( # noqa: UP031
config.dbserver.listen,
config.dbserver.receiver_ports,
)
skt = zeromq.Socket(rcvr, zeromq.zmq.PULL, 'bind').__enter__()
- mon.backurl = 'tcp://%s:%s' % (config.dbserver.host, skt.port)
+ mon.backurl = 'tcp://%s:%s' % (config.dbserver.host, skt.port) # noqa: UP031
mon = mon.new(
operation='total ' + self.calculator.core_task.__func__.__name__,
measuremem=True,
@@ -1081,11 +1081,11 @@ def run_calc(self):
self.dstore = dstore
finally:
- print('FetchOpenQuake: OpenQuake Hazard Calculator defined.')
+ print('FetchOpenQuake: OpenQuake Hazard Calculator defined.') # noqa: T201
# parallel.Starmap.shutdown()
- def eval_calc(self):
- """Evaluate each calculators for different IMs"""
+ def eval_calc(self): # noqa: C901, PLR0912, PLR0915
+ """Evaluate each calculators for different IMs""" # noqa: D400
# Define the GmfGetter
# for args_tag in range(len(self.args)-1):
@@ -1096,9 +1096,9 @@ def eval_calc(self):
from openquake.commands import dbserver as cdbs
from openquake.hazardlib import calc, const, gsim
- if self.vtag >= 12:
+ if self.vtag >= 12: # noqa: PLR2004
from openquake.hazardlib.const import StdDev
- if self.vtag >= 12:
+ if self.vtag >= 12: # noqa: PLR2004
from openquake.commonlib import datastore
else:
from openquake.baselib import datastore
@@ -1114,17 +1114,17 @@ def eval_calc(self):
)
# Evaluate each computer
- print('FetchOpenQuake: Evaluating ground motion models.')
+ print('FetchOpenQuake: Evaluating ground motion models.') # noqa: T201
for computer in cur_getter.gen_computers(self.mon):
# Looping over rupture(s) in the current realization
- sids = computer.sids
+ sids = computer.sids # noqa: F841
# print('eval_calc: site ID sids = ')
# print(sids)
eids_by_rlz = computer.ebrupture.get_eids_by_rlz(cur_getter.rlzs_by_gsim)
mag = computer.ebrupture.rupture.mag
im_list = []
- data = general.AccumDict(accum=[])
- cur_T = self.event_info['IntensityMeasure'].get('Periods', None)
+ data = general.AccumDict(accum=[]) # noqa: F841
+ cur_T = self.event_info['IntensityMeasure'].get('Periods', None) # noqa: N806
for cur_gs, rlzs in cur_getter.rlzs_by_gsim.items():
# Looping over GMPE(s)
# print('eval_calc: cur_gs = ')
@@ -1135,11 +1135,11 @@ def eval_calc(self):
# NB: the trick for performance is to keep the call to
# .compute outside of the loop over the realizations;
# it is better to have few calls producing big arrays
- tmpMean = []
+ tmpMean = [] # noqa: N806
tmpstdtot = []
tmpstdinter = []
tmpstdintra = []
- if self.vtag >= 12:
+ if self.vtag >= 12: # noqa: PLR2004
mean_stds_all = computer.cmaker.get_mean_stds(
[computer.ctx], StdDev.EVENT
)[0]
@@ -1147,19 +1147,19 @@ def eval_calc(self):
# Looping over IM(s)
# print('eval_calc: imt = ', imt)
if str(imt) in ['PGA', 'PGV', 'PGD']:
- cur_T = [0.0]
+ cur_T = [0.0] # noqa: N806
im_list.append(str(imt))
- imTag = 'ln' + str(imt)
+ imTag = 'ln' + str(imt) # noqa: N806
else:
if 'SA' not in im_list:
im_list.append('SA')
- imTag = 'lnSA'
+ imTag = 'lnSA' # noqa: N806
if isinstance(cur_gs, gsim.multi.MultiGMPE):
gs = cur_gs[str(imt)] # MultiGMPE
else:
gs = cur_gs # regular GMPE
try:
- if self.vtag >= 12:
+ if self.vtag >= 12: # noqa: PLR2004
mean_stds = mean_stds_all[:, imti]
num_sids = len(computer.sids)
num_stds = len(mean_stds)
@@ -1167,8 +1167,8 @@ def eval_calc(self):
# no standard deviation is available
# for truncation_level = 0 there is only mean, no stds
if computer.correlation_model:
- raise ValueError(
- 'truncation_level=0 requires '
+ raise ValueError( # noqa: TRY003, TRY301
+ 'truncation_level=0 requires ' # noqa: EM101
'no correlation model'
)
mean = mean_stds[0]
@@ -1176,7 +1176,7 @@ def eval_calc(self):
stddev_inter = 0
stddev_total = 0
if imti == 0:
- tmpMean = mean
+ tmpMean = mean # noqa: N806
tmpstdinter = np.concatenate(
(tmpstdinter, stddev_inter), axis=1
)
@@ -1185,7 +1185,7 @@ def eval_calc(self):
)
tmpstdtot = stddev_total
else:
- tmpMean = np.concatenate((tmpMean, mean), axis=0)
+ tmpMean = np.concatenate((tmpMean, mean), axis=0) # noqa: N806
tmpstdinter = np.concatenate(
(tmpstdinter, stddev_inter), axis=1
)
@@ -1195,7 +1195,7 @@ def eval_calc(self):
tmpstdtot = np.concatenate(
(tmpstdtot, stddev_total), axis=0
)
- elif num_stds == 2:
+ elif num_stds == 2: # noqa: PLR2004
# If the GSIM provides only total standard deviation, we need
# to compute mean and total standard deviation at the sites
# of interest.
@@ -1203,19 +1203,19 @@ def eval_calc(self):
# By default, we evaluate stddev_inter as the stddev_total
if self.correlation_model:
- raise CorrelationButNoInterIntraStdDevs(
+ raise CorrelationButNoInterIntraStdDevs( # noqa: TRY301
self.correlation_model, gsim
)
mean, stddev_total = mean_stds
stddev_total = stddev_total.reshape(
- stddev_total.shape + (1,)
+ stddev_total.shape + (1,) # noqa: RUF005
)
- mean = mean.reshape(mean.shape + (1,))
+ mean = mean.reshape(mean.shape + (1,)) # noqa: RUF005
stddev_inter = stddev_total
stddev_intra = 0
if imti == 0:
- tmpMean = mean
+ tmpMean = mean # noqa: N806
tmpstdinter = np.concatenate(
(tmpstdinter, stddev_inter), axis=1
)
@@ -1224,7 +1224,7 @@ def eval_calc(self):
)
tmpstdtot = stddev_total
else:
- tmpMean = np.concatenate((tmpMean, mean), axis=0)
+ tmpMean = np.concatenate((tmpMean, mean), axis=0) # noqa: N806
tmpstdinter = np.concatenate(
(tmpstdinter, stddev_inter), axis=1
)
@@ -1237,14 +1237,14 @@ def eval_calc(self):
else:
mean, stddev_inter, stddev_intra = mean_stds
stddev_intra = stddev_intra.reshape(
- stddev_intra.shape + (1,)
+ stddev_intra.shape + (1,) # noqa: RUF005
)
stddev_inter = stddev_inter.reshape(
- stddev_inter.shape + (1,)
+ stddev_inter.shape + (1,) # noqa: RUF005
)
- mean = mean.reshape(mean.shape + (1,))
+ mean = mean.reshape(mean.shape + (1,)) # noqa: RUF005
if imti == 0:
- tmpMean = mean
+ tmpMean = mean # noqa: N806
tmpstdinter = stddev_inter
tmpstdintra = stddev_intra
tmpstdtot = np.sqrt(
@@ -1252,7 +1252,7 @@ def eval_calc(self):
+ stddev_intra * stddev_intra
)
else:
- tmpMean = np.concatenate((tmpMean, mean), axis=1)
+ tmpMean = np.concatenate((tmpMean, mean), axis=1) # noqa: N806
tmpstdinter = np.concatenate(
(tmpstdinter, stddev_inter), axis=1
)
@@ -1270,13 +1270,13 @@ def eval_calc(self):
axis=1,
)
- elif self.vtag == 11:
+ elif self.vtag == 11: # noqa: PLR2004
# v11
dctx = computer.dctx.roundup(cur_gs.minimum_distance)
if computer.distribution is None:
if computer.correlation_model:
- raise ValueError(
- 'truncation_level=0 requires '
+ raise ValueError( # noqa: TRY003, TRY301
+ 'truncation_level=0 requires ' # noqa: EM101
'no correlation model'
)
mean, _stddevs = cur_gs.get_mean_and_stddevs(
@@ -1295,7 +1295,7 @@ def eval_calc(self):
# of interest.
# In this case, we also assume no correlation model is used.
if computer.correlation_model:
- raise CorrelationButNoInterIntraStdDevs(
+ raise CorrelationButNoInterIntraStdDevs( # noqa: TRY301
computer.correlation_model, cur_gs
)
@@ -1307,14 +1307,14 @@ def eval_calc(self):
[const.StdDev.TOTAL],
)
stddev_total = stddev_total.reshape(
- stddev_total.shape + (1,)
+ stddev_total.shape + (1,) # noqa: RUF005
)
- mean = mean.reshape(mean.shape + (1,))
+ mean = mean.reshape(mean.shape + (1,)) # noqa: RUF005
if imti == 0:
- tmpMean = mean
+ tmpMean = mean # noqa: N806
tmpstdtot = stddev_total
else:
- tmpMean = np.concatenate((tmpMean, mean), axis=0)
+ tmpMean = np.concatenate((tmpMean, mean), axis=0) # noqa: N806
tmpstdtot = np.concatenate(
(tmpstdtot, stddev_total), axis=0
)
@@ -1332,15 +1332,15 @@ def eval_calc(self):
)
)
stddev_intra = stddev_intra.reshape(
- stddev_intra.shape + (1,)
+ stddev_intra.shape + (1,) # noqa: RUF005
)
stddev_inter = stddev_inter.reshape(
- stddev_inter.shape + (1,)
+ stddev_inter.shape + (1,) # noqa: RUF005
)
- mean = mean.reshape(mean.shape + (1,))
+ mean = mean.reshape(mean.shape + (1,)) # noqa: RUF005
if imti == 0:
- tmpMean = mean
+ tmpMean = mean # noqa: N806
tmpstdinter = stddev_inter
tmpstdintra = stddev_intra
tmpstdtot = np.sqrt(
@@ -1348,7 +1348,7 @@ def eval_calc(self):
+ stddev_intra * stddev_intra
)
else:
- tmpMean = np.concatenate((tmpMean, mean), axis=1)
+ tmpMean = np.concatenate((tmpMean, mean), axis=1) # noqa: N806
tmpstdinter = np.concatenate(
(tmpstdinter, stddev_inter), axis=1
)
@@ -1371,8 +1371,8 @@ def eval_calc(self):
dctx = computer.dctx.roundup(cur_gs.minimum_distance)
if computer.truncation_level == 0:
if computer.correlation_model:
- raise ValueError(
- 'truncation_level=0 requires '
+ raise ValueError( # noqa: TRY003, TRY301
+ 'truncation_level=0 requires ' # noqa: EM101
'no correlation model'
)
mean, _stddevs = cur_gs.get_mean_and_stddevs(
@@ -1382,7 +1382,7 @@ def eval_calc(self):
imt,
stddev_types=[],
)
- num_sids = len(computer.sids)
+ num_sids = len(computer.sids) # noqa: F841
if {
const.StdDev.TOTAL
} == cur_gs.DEFINED_FOR_STANDARD_DEVIATION_TYPES:
@@ -1391,7 +1391,7 @@ def eval_calc(self):
# of interest.
# In this case, we also assume no correlation model is used.
if computer.correlation_model:
- raise CorrelationButNoInterIntraStdDevs(
+ raise CorrelationButNoInterIntraStdDevs( # noqa: TRY301
computer.correlation_model, cur_gs
)
@@ -1403,14 +1403,14 @@ def eval_calc(self):
[const.StdDev.TOTAL],
)
stddev_total = stddev_total.reshape(
- stddev_total.shape + (1,)
+ stddev_total.shape + (1,) # noqa: RUF005
)
- mean = mean.reshape(mean.shape + (1,))
+ mean = mean.reshape(mean.shape + (1,)) # noqa: RUF005
if imti == 0:
- tmpMean = mean
+ tmpMean = mean # noqa: N806
tmpstdtot = stddev_total
else:
- tmpMean = np.concatenate((tmpMean, mean), axis=0)
+ tmpMean = np.concatenate((tmpMean, mean), axis=0) # noqa: N806
tmpstdtot = np.concatenate(
(tmpstdtot, stddev_total), axis=0
)
@@ -1428,15 +1428,15 @@ def eval_calc(self):
)
)
stddev_intra = stddev_intra.reshape(
- stddev_intra.shape + (1,)
+ stddev_intra.shape + (1,) # noqa: RUF005
)
stddev_inter = stddev_inter.reshape(
- stddev_inter.shape + (1,)
+ stddev_inter.shape + (1,) # noqa: RUF005
)
- mean = mean.reshape(mean.shape + (1,))
+ mean = mean.reshape(mean.shape + (1,)) # noqa: RUF005
if imti == 0:
- tmpMean = mean
+ tmpMean = mean # noqa: N806
tmpstdinter = stddev_inter
tmpstdintra = stddev_intra
tmpstdtot = np.sqrt(
@@ -1444,7 +1444,7 @@ def eval_calc(self):
+ stddev_intra * stddev_intra
)
else:
- tmpMean = np.concatenate((tmpMean, mean), axis=1)
+ tmpMean = np.concatenate((tmpMean, mean), axis=1) # noqa: N806
tmpstdinter = np.concatenate(
(tmpstdinter, stddev_inter), axis=1
)
@@ -1462,9 +1462,9 @@ def eval_calc(self):
axis=1,
)
- except Exception as exc:
- raise RuntimeError(
- '(%s, %s, source_id=%r) %s: %s'
+ except Exception as exc: # noqa: BLE001
+ raise RuntimeError( # noqa: B904
+ '(%s, %s, source_id=%r) %s: %s' # noqa: UP031
% (
gs,
imt,
@@ -1479,7 +1479,7 @@ def eval_calc(self):
gm_collector = []
# collect data
for k in range(tmpMean.shape[0]):
- imResult = {}
+ imResult = {} # noqa: N806
if len(tmpMean):
imResult.update(
{'Mean': [float(x) for x in tmpMean[k].tolist()]}
@@ -1515,7 +1515,7 @@ def eval_calc(self):
self.calculator.datastore.close()
# stop dbserver
- if self.vtag >= 11:
+ if self.vtag >= 11: # noqa: PLR2004
cdbs.main('stop')
else:
cdbs.dbserver('stop')
@@ -1525,11 +1525,11 @@ def eval_calc(self):
self.prc.kill()
# copy calc hdf file
- if self.vtag >= 11:
+ if self.vtag >= 11: # noqa: PLR2004
calc_id = datastore.get_last_calc_id()
- path = os.path.join(datastore.get_datadir(), 'calc_%d.hdf5' % calc_id)
+ path = os.path.join(datastore.get_datadir(), 'calc_%d.hdf5' % calc_id) # noqa: PTH118
else:
- path = os.path.join(
+ path = os.path.join( # noqa: PTH118
datastore.get_datadir(), 'calc_%d.hdf5' % self.calc_id
)
@@ -1537,9 +1537,9 @@ def eval_calc(self):
dir_output = self.dir_info['Output']
try:
shutil.copy2(path, dir_output)
- print('FetchOpenQuake: calc hdf file saved.')
- except:
- print('FetchOpenQuake: failed to copy calc hdf file.')
+ print('FetchOpenQuake: calc hdf file saved.') # noqa: T201
+ except: # noqa: E722
+ print('FetchOpenQuake: failed to copy calc hdf file.') # noqa: T201
# Final results
res = {
@@ -1550,30 +1550,30 @@ def eval_calc(self):
}
# return
- return res
+ return res # noqa: RET504
- def calculator_build_events_from_sources(self):
- """Prefilter the composite source model and store the source_info"""
+ def calculator_build_events_from_sources(self): # noqa: C901
+ """Prefilter the composite source model and store the source_info""" # noqa: D400
gsims_by_trt = self.calculator.csm.full_lt.get_gsims_by_trt()
- print('FetchOpenQuake: self.calculator.csm.src_groups = ')
- print(self.calculator.csm.src_groups)
+ print('FetchOpenQuake: self.calculator.csm.src_groups = ') # noqa: T201
+ print(self.calculator.csm.src_groups) # noqa: T201
sources = self.calculator.csm.get_sources()
- print('FetchOpenQuake: sources = ')
- print(sources)
+ print('FetchOpenQuake: sources = ') # noqa: T201
+ print(sources) # noqa: T201
for src in sources:
src.nsites = 1 # avoid 0 weight
src.num_ruptures = src.count_ruptures()
maxweight = sum(sg.weight for sg in self.calculator.csm.src_groups) / (
self.calculator.oqparam.concurrent_tasks or 1
)
- print('FetchOpenQuake: weights = ')
- print([sg.weight for sg in self.calculator.csm.src_groups])
- print('FetchOpenQuake: maxweight = ')
- print(maxweight)
+ print('FetchOpenQuake: weights = ') # noqa: T201
+ print([sg.weight for sg in self.calculator.csm.src_groups]) # noqa: T201
+ print('FetchOpenQuake: maxweight = ') # noqa: T201
+ print(maxweight) # noqa: T201
# trt => potential ruptures
- eff_ruptures = general.AccumDict(accum=0)
+ eff_ruptures = general.AccumDict(accum=0) # noqa: F821
# nr, ns, dt
- calc_times = general.AccumDict(accum=np.zeros(3, np.float32))
+ calc_times = general.AccumDict(accum=np.zeros(3, np.float32)) # noqa: F821
allargs = []
if self.calculator.oqparam.is_ucerf():
# manage the filtering in a special way
@@ -1581,7 +1581,7 @@ def calculator_build_events_from_sources(self):
for src in sg:
src.src_filter = self.calculator.srcfilter
# otherwise it would be ultra-slow
- srcfilter = calc.filters.nofilter
+ srcfilter = calc.filters.nofilter # noqa: F821
else:
srcfilter = self.calculator.srcfilter
logging.info('Building ruptures')
@@ -1592,25 +1592,25 @@ def calculator_build_events_from_sources(self):
par = self.calculator.param.copy()
par['gsims'] = gsims_by_trt[sg.trt]
for src_group in sg.split(maxweight):
- allargs.append((src_group, srcfilter, par))
+ allargs.append((src_group, srcfilter, par)) # noqa: PERF401
smap = []
for curargs in allargs:
- smap.append(
- calc.stochastic.sample_ruptures(curargs[0], curargs[1], curargs[2])
+ smap.append( # noqa: PERF401
+ calc.stochastic.sample_ruptures(curargs[0], curargs[1], curargs[2]) # noqa: F821
)
- print('smap = ')
- print(smap)
+ print('smap = ') # noqa: T201
+ print(smap) # noqa: T201
self.calculator.nruptures = 0
mon = self.calculator.monitor('saving ruptures')
for tmp in smap:
dic = next(tmp)
- print(dic)
+ print(dic) # noqa: T201
# NB: dic should be a dictionary, but when the calculation dies
# for an OOM it can become None, thus giving a very confusing error
if dic is None:
- raise MemoryError('You ran out of memory!')
+ raise MemoryError('You ran out of memory!') # noqa: EM101, TRY003
rup_array = dic['rup_array']
if len(rup_array) == 0:
continue
@@ -1624,34 +1624,34 @@ def calculator_build_events_from_sources(self):
self.calculator.nruptures, self.calculator.nruptures + n
)
self.calculator.nruptures += n
- hdf5.extend(self.calculator.datastore['ruptures'], rup_array)
- hdf5.extend(self.calculator.datastore['rupgeoms'], rup_array.geom)
+ hdf5.extend(self.calculator.datastore['ruptures'], rup_array) # noqa: F821
+ hdf5.extend(self.calculator.datastore['rupgeoms'], rup_array.geom) # noqa: F821
if len(self.calculator.datastore['ruptures']) == 0:
- raise RuntimeError(
- 'No ruptures were generated, perhaps the '
+ raise RuntimeError( # noqa: TRY003
+ 'No ruptures were generated, perhaps the ' # noqa: EM101
'investigation time is too short'
)
# must be called before storing the events
self.calculator.store_rlz_info(eff_ruptures) # store full_lt
self.calculator.store_source_info(calc_times)
- imp = commonlib.calc.RuptureImporter(self.calculator.datastore)
- print('self.calculator.datastore.getitem(ruptures)')
- print(self.calculator.datastore.getitem('ruptures'))
+ imp = commonlib.calc.RuptureImporter(self.calculator.datastore) # noqa: F821
+ print('self.calculator.datastore.getitem(ruptures)') # noqa: T201
+ print(self.calculator.datastore.getitem('ruptures')) # noqa: T201
with self.calculator.monitor('saving ruptures and events'):
imp.import_rups_events(
self.calculator.datastore.getitem('ruptures')[()],
- getters.get_rupture_getters,
+ getters.get_rupture_getters, # noqa: F821
)
-class CorrelationButNoInterIntraStdDevs(Exception):
+class CorrelationButNoInterIntraStdDevs(Exception): # noqa: N818, D101
def __init__(self, corr, gsim):
self.corr = corr
self.gsim = gsim
- def __str__(self):
+ def __str__(self): # noqa: D105
return (
f'You cannot use the correlation model '
f'{self.corr.__class__.__name__} with the '
@@ -1664,13 +1664,13 @@ def __str__(self):
def to_imt_unit_values(vals, imt):
- """Exponentiate the values unless the IMT is MMI"""
+ """Exponentiate the values unless the IMT is MMI""" # noqa: D400
if str(imt) == 'MMI':
return vals
return np.exp(vals)
-def export_rupture_to_json(scenario_info, mlon, mlat, siteFile, work_dir):
+def export_rupture_to_json(scenario_info, mlon, mlat, siteFile, work_dir): # noqa: C901, N803, D103
import json
from openquake.commonlib import readinput
@@ -1682,11 +1682,11 @@ def export_rupture_to_json(scenario_info, mlon, mlat, siteFile, work_dir):
from openquake.hazardlib.geo.mesh import Mesh, surface_to_arrays
from openquake.hazardlib.geo.surface.base import BaseSurface
- in_dir = os.path.join(work_dir, 'Input')
- outfile = os.path.join(work_dir, 'Output', 'RupFile.geojson')
+ in_dir = os.path.join(work_dir, 'Input') # noqa: PTH118
+ outfile = os.path.join(work_dir, 'Output', 'RupFile.geojson') # noqa: PTH118
erf_data = {'type': 'FeatureCollection'}
oq = readinput.get_oqparam(
- dict(
+ dict( # noqa: C408
calculation_mode='classical',
inputs={'site_model': [siteFile]},
intensity_measure_types_and_levels="{'PGA': [0.1], 'SA(0.1)': [0.1]}", # place holder for initiating oqparam. Not used in ERF
@@ -1709,7 +1709,7 @@ def export_rupture_to_json(scenario_info, mlon, mlat, siteFile, work_dir):
rupture_mesh_spacing = scenario_info['EqRupture']['rupture_mesh_spacing']
rupture_mesh_spacing = scenario_info['EqRupture']['rupture_mesh_spacing']
[src_nrml] = nrml.read(
- os.path.join(in_dir, scenario_info['EqRupture']['sourceFile'])
+ os.path.join(in_dir, scenario_info['EqRupture']['sourceFile']) # noqa: PTH118
)
conv = sourceconverter.SourceConverter(
scenario_info['EqRupture']['investigation_time'],
@@ -1723,14 +1723,14 @@ def export_rupture_to_json(scenario_info, mlon, mlat, siteFile, work_dir):
sources = []
sources_dist = []
sources_id = []
- id = 0
- siteMeanCol = site.SiteCollection.from_points([mlon], [mlat])
+ id = 0 # noqa: A001
+ siteMeanCol = site.SiteCollection.from_points([mlon], [mlat]) # noqa: N806
srcfilter = SourceFilter(siteMeanCol, oq.maximum_distance)
- minMag = scenario_info['EqRupture']['min_mag']
- maxMag = scenario_info['EqRupture']['max_mag']
+ minMag = scenario_info['EqRupture']['min_mag'] # noqa: N806
+ maxMag = scenario_info['EqRupture']['max_mag'] # noqa: N806
for i in range(len(src_nrml)):
subnode = src_nrml[i]
- subSrc = src_raw[i]
+ subSrc = src_raw[i] # noqa: N806
tag = (
subnode.tag.rsplit('}')[1]
if subnode.tag.startswith('{')
@@ -1739,7 +1739,7 @@ def export_rupture_to_json(scenario_info, mlon, mlat, siteFile, work_dir):
if tag == 'sourceGroup':
for j in range(len(subnode)):
subsubnode = subnode[j]
- subsubSrc = subSrc[j]
+ subsubSrc = subSrc[j] # noqa: N806
subtag = (
subsubnode.tag.rsplit('}')[1]
if subsubnode.tag.startswith('{')
@@ -1751,22 +1751,22 @@ def export_rupture_to_json(scenario_info, mlon, mlat, siteFile, work_dir):
):
subsubSrc.id = id
sources_id.append(id)
- id += 1
+ id += 1 # noqa: A001
sources.append(subsubSrc)
- sourceMesh = subsubSrc.polygon.discretize(rupture_mesh_spacing)
- sourceSurface = BaseSurface(sourceMesh)
- siteMesh = Mesh(siteMeanCol.lon, siteMeanCol.lat)
+ sourceMesh = subsubSrc.polygon.discretize(rupture_mesh_spacing) # noqa: N806
+ sourceSurface = BaseSurface(sourceMesh) # noqa: N806
+ siteMesh = Mesh(siteMeanCol.lon, siteMeanCol.lat) # noqa: N806
sources_dist.append(sourceSurface.get_min_distance(siteMesh))
elif (
tag.endswith('Source') and srcfilter.get_close_sites(subSrc) is not None
):
subSrc.id = id
sources_id.append(id)
- id += 1
+ id += 1 # noqa: A001
sources.append(subSrc)
- sourceMesh = subSrc.polygon.discretize(rupture_mesh_spacing)
- sourceSurface = BaseSurface(sourceMesh)
- siteMesh = Mesh(siteMeanCol.lon, siteMeanCol.lat)
+ sourceMesh = subSrc.polygon.discretize(rupture_mesh_spacing) # noqa: N806
+ sourceSurface = BaseSurface(sourceMesh) # noqa: N806
+ siteMesh = Mesh(siteMeanCol.lon, siteMeanCol.lat) # noqa: N806
sources_dist.append(sourceSurface.get_min_distance(siteMesh))
sources_df = pd.DataFrame.from_dict(
{'source': sources, 'sourceDist': sources_dist, 'sourceID': sources_id}
@@ -1774,8 +1774,8 @@ def export_rupture_to_json(scenario_info, mlon, mlat, siteFile, work_dir):
sources_df = sources_df.sort_values(['sourceDist'], ascending=(True))
sources_df = sources_df.set_index('sourceID')
allrups = []
- allrups_rRup = []
- allrups_srcId = []
+ allrups_rRup = [] # noqa: N806
+ allrups_srcId = [] # noqa: N806
for src in sources_df['source']:
src_rups = list(src.iter_ruptures())
for i, rup in enumerate(src_rups):
@@ -1801,12 +1801,12 @@ def export_rupture_to_json(scenario_info, mlon, mlat, siteFile, work_dir):
maf = rup.occurrence_rate
if maf <= 0.0:
continue
- ruptureSurface = rup.surface
+ ruptureSurface = rup.surface # noqa: N806, F841
# Properties
- cur_dict['properties'] = dict()
+ cur_dict['properties'] = dict() # noqa: C408
name = sources_df.loc[src_id, 'source'].name
cur_dict['properties'].update({'Name': name})
- Mag = float(rup.mag)
+ Mag = float(rup.mag) # noqa: N806
if (Mag < minMag) or (Mag > maxMag):
continue
cur_dict['properties'].update({'Magnitude': Mag})
@@ -1846,7 +1846,7 @@ def export_rupture_to_json(scenario_info, mlon, mlat, siteFile, work_dir):
cur_dict['properties'].update(
{'DistanceX': get_distances(rup, siteMeanCol, 'rx')[0]}
)
- cur_dict['geometry'] = dict()
+ cur_dict['geometry'] = dict() # noqa: C408
# if (len(arrays)==1 and arrays[0].shape[1]==1 and arrays[0].shape[2]==1):
# # Point Source
# cur_dict['geometry'].update({'type': 'Point'})
@@ -1859,7 +1859,7 @@ def export_rupture_to_json(scenario_info, mlon, mlat, siteFile, work_dir):
) # See the get_top_edge_depth method of the BaseSurface class
coordinates = []
for i in range(len(top_edge.lats)):
- coordinates.append([top_edge.lons[i], top_edge.lats[i]])
+ coordinates.append([top_edge.lons[i], top_edge.lats[i]]) # noqa: PERF401
cur_dict['geometry'].update({'type': 'LineString'})
cur_dict['geometry'].update({'coordinates': coordinates})
else:
@@ -1878,28 +1878,28 @@ def export_rupture_to_json(scenario_info, mlon, mlat, siteFile, work_dir):
feature_collection_sorted = [feature_collection[i] for i in sort_ids]
del feature_collection
erf_data.update({'features': feature_collection_sorted})
- print(
+ print( # noqa: T201
f'FetchOpenquake: total {len(feature_collection_sorted)} ruptures are collected.'
)
# Output
if outfile is not None:
- print(
+ print( # noqa: T201
f'The collected ruptures are sorted by MeanAnnualRate and saved in {outfile}'
)
- with open(outfile, 'w') as f:
+ with open(outfile, 'w') as f: # noqa: PTH123
json.dump(erf_data, f, indent=2)
-def get_site_rup_info_oq(source_info, siteList):
+def get_site_rup_info_oq(source_info, siteList): # noqa: N803, D103
from openquake.hazardlib import site
from openquake.hazardlib.calc.filters import get_distances
rup = source_info['rup']
- distToRupture = []
- distJB = []
- distX = []
+ distToRupture = [] # noqa: N806, F841
+ distJB = [] # noqa: N806, F841
+ distX = [] # noqa: N806, F841
for i in range(len(siteList)):
- siteMeanCol = site.SiteCollection.from_points(
+ siteMeanCol = site.SiteCollection.from_points( # noqa: N806
[siteList[i]['lon']], [siteList[i]['lat']]
)
siteList[i].update({'rRup': get_distances(rup, siteMeanCol, 'rrup')[0]})
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py
index d60fc8ad9..c1454f127 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/FetchOpenSHA.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -40,20 +40,20 @@
import numpy as np
import pandas as pd
import ujson
-from java.io import *
-from java.lang import *
-from java.lang.reflect import *
-from java.util import *
-from org.opensha.commons.data import *
-from org.opensha.commons.data.function import *
-from org.opensha.commons.data.siteData import *
-from org.opensha.commons.geo import *
-from org.opensha.commons.param import *
-from org.opensha.commons.param.constraint import *
-from org.opensha.commons.param.event import *
-from org.opensha.sha.calc import *
-from org.opensha.sha.earthquake import *
-from org.opensha.sha.earthquake.param import *
+from java.io import * # noqa: F403
+from java.lang import * # noqa: F403
+from java.lang.reflect import * # noqa: F403
+from java.util import * # noqa: F403
+from org.opensha.commons.data import * # noqa: F403
+from org.opensha.commons.data.function import * # noqa: F403
+from org.opensha.commons.data.siteData import * # noqa: F403
+from org.opensha.commons.geo import * # noqa: F403
+from org.opensha.commons.param import * # noqa: F403
+from org.opensha.commons.param.constraint import * # noqa: F403
+from org.opensha.commons.param.event import * # noqa: F403
+from org.opensha.sha.calc import * # noqa: F403
+from org.opensha.sha.earthquake import * # noqa: F403
+from org.opensha.sha.earthquake.param import * # noqa: F403
from org.opensha.sha.earthquake.rupForecastImpl.Frankel02 import (
Frankel02_AdjustableEqkRupForecast,
)
@@ -64,29 +64,29 @@
from org.opensha.sha.earthquake.rupForecastImpl.WGCEP_UCERF_2_Final.MeanUCERF2 import (
MeanUCERF2,
)
-from org.opensha.sha.faultSurface import *
+from org.opensha.sha.faultSurface import * # noqa: F403
from org.opensha.sha.faultSurface.utils import PtSrcDistCorr
-from org.opensha.sha.imr import *
-from org.opensha.sha.imr.attenRelImpl import *
-from org.opensha.sha.imr.attenRelImpl.ngaw2 import *
-from org.opensha.sha.imr.attenRelImpl.ngaw2.NGAW2_Wrappers import *
-from org.opensha.sha.imr.param.IntensityMeasureParams import *
-from org.opensha.sha.imr.param.OtherParams import *
-from org.opensha.sha.util import *
+from org.opensha.sha.imr import * # noqa: F403
+from org.opensha.sha.imr.attenRelImpl import * # noqa: F403
+from org.opensha.sha.imr.attenRelImpl.ngaw2 import * # noqa: F403
+from org.opensha.sha.imr.attenRelImpl.ngaw2.NGAW2_Wrappers import * # noqa: F403
+from org.opensha.sha.imr.param.IntensityMeasureParams import * # noqa: F403
+from org.opensha.sha.imr.param.OtherParams import * # noqa: F403
+from org.opensha.sha.util import * # noqa: F403
from tqdm import tqdm
try:
from scratch.UCERF3.erf.mean import MeanUCERF3
except ModuleNotFoundError:
- MeanUCERF3 = jpype.JClass('scratch.UCERF3.erf.mean.MeanUCERF3')
+ MeanUCERF3 = jpype.JClass('scratch.UCERF3.erf.mean.MeanUCERF3') # noqa: F405
-from org.opensha.sha.gcim.calc import *
-from org.opensha.sha.gcim.imr.attenRelImpl import *
-from org.opensha.sha.gcim.imr.param.EqkRuptureParams import *
-from org.opensha.sha.gcim.imr.param.IntensityMeasureParams import *
+from org.opensha.sha.gcim.calc import * # noqa: F403
+from org.opensha.sha.gcim.imr.attenRelImpl import * # noqa: F403
+from org.opensha.sha.gcim.imr.param.EqkRuptureParams import * # noqa: F403
+from org.opensha.sha.gcim.imr.param.IntensityMeasureParams import * # noqa: F403
-def getERF(scenario_info, update_flag=True):
+def getERF(scenario_info, update_flag=True): # noqa: FBT002, C901, N802, D103
# Initialization
erf = None
erf_name = scenario_info['EqRupture']['Model']
@@ -95,15 +95,15 @@ def getERF(scenario_info, update_flag=True):
if erf_name == 'WGCEP (2007) UCERF2 - Single Branch':
erf = MeanUCERF2()
if (erf_selection.get('Background Seismicity', None) == 'Exclude') and (
- 'Treat Background Seismicity As' in erf_selection.keys()
+ 'Treat Background Seismicity As' in erf_selection.keys() # noqa: SIM118
):
value = erf_selection.pop('Treat Background Seismicity As')
- print(
+ print( # noqa: T201
f'Background Seismicvity is set as Excluded, Treat Background Seismicity As: {value} is ignored'
)
for key, value in erf_selection.items():
if type(value) is int:
- value = float(value)
+ value = float(value) # noqa: PLW2901
erf.setParameter(key, value)
# erf.getParameter(key).setValue(value)
elif erf_name == 'USGS/CGS 2002 Adj. Cal. ERF':
@@ -118,10 +118,10 @@ def getERF(scenario_info, update_flag=True):
):
tmp.setPreset(MeanUCERF3.Presets.BOTH_FM_BRANCH_AVG)
if (erf_selection.get('Background Seismicity', None) == 'Exclude') and (
- 'Treat Background Seismicity As' in erf_selection.keys()
+ 'Treat Background Seismicity As' in erf_selection.keys() # noqa: SIM118
):
value = erf_selection.pop('Treat Background Seismicity As')
- print(
+ print( # noqa: T201
f'Background Seismicvity is set as Excluded, Treat Background Seismicity As: {value} is ignored'
)
# Some parameters in MeanUCERF3 have overloaded setValue() Need to set one by one
@@ -144,10 +144,10 @@ def getERF(scenario_info, update_flag=True):
elif erf_selection.get('preset', None) == 'FM3.1 Branch Averaged':
tmp.setPreset(MeanUCERF3.Presets.FM3_1_BRANCH_AVG)
if (erf_selection.get('Background Seismicity', None) == 'Exclude') and (
- 'Treat Background Seismicity As' in erf_selection.keys()
+ 'Treat Background Seismicity As' in erf_selection.keys() # noqa: SIM118
):
value = erf_selection.pop('Treat Background Seismicity As')
- print(
+ print( # noqa: T201
f'Background Seismicvity is set as Excluded, Treat Background Seismicity As: {value} is ignored'
)
# Some parameters in MeanUCERF3 have overloaded setValue() Need to set one by one
@@ -172,10 +172,10 @@ def getERF(scenario_info, update_flag=True):
elif erf_selection.get('preset', None) == 'FM3.2 Branch Averaged':
tmp.setPreset(MeanUCERF3.Presets.FM3_2_BRANCH_AVG)
if (erf_selection.get('Background Seismicity', None) == 'Exclude') and (
- 'Treat Background Seismicity As' in erf_selection.keys()
+ 'Treat Background Seismicity As' in erf_selection.keys() # noqa: SIM118
):
value = erf_selection.pop('Treat Background Seismicity As')
- print(
+ print( # noqa: T201
f'Background Seismicvity is set as Excluded, Treat Background Seismicity As: {value} is ignored'
)
# Some parameters in MeanUCERF3 have overloaded setValue() Need to set one by one
@@ -198,7 +198,7 @@ def getERF(scenario_info, update_flag=True):
# Set Probability Model Option
setERFProbabilityModelOptions(tmp, erf_selection)
else:
- print(
+ print( # noqa: T201
f"""The specified Mean UCERF3 preset {erf_selection.get("preset", None)} is not implemented"""
)
erf = tmp
@@ -206,7 +206,7 @@ def getERF(scenario_info, update_flag=True):
elif erf_name == 'WGCEP Eqk Rate Model 2 ERF':
erf = UCERF2()
else:
- print('Please check the ERF model name.')
+ print('Please check the ERF model name.') # noqa: T201
if erf_name and update_flag:
erf.updateForecast()
@@ -214,182 +214,182 @@ def getERF(scenario_info, update_flag=True):
return erf
-def setERFbackgroundOptions(erf, selection):
+def setERFbackgroundOptions(erf, selection): # noqa: N802, D103
option = selection.get('Background Seismicity', None)
if option == 'Include':
- erf.setParameter('Background Seismicity', IncludeBackgroundOption.INCLUDE)
+ erf.setParameter('Background Seismicity', IncludeBackgroundOption.INCLUDE) # noqa: F405
elif option == 'Exclude':
- erf.setParameter('Background Seismicity', IncludeBackgroundOption.EXCLUDE)
+ erf.setParameter('Background Seismicity', IncludeBackgroundOption.EXCLUDE) # noqa: F405
elif option == 'Only':
- erf.setParameter('Background Seismicity', IncludeBackgroundOption.ONLY)
+ erf.setParameter('Background Seismicity', IncludeBackgroundOption.ONLY) # noqa: F405
-def setERFtreatBackgroundOptions(erf, selection):
+def setERFtreatBackgroundOptions(erf, selection): # noqa: N802, D103
option = selection.get('Treat Background Seismicity As', None)
if option is None:
pass
elif option == 'Point Sources':
- erf.setParameter('Treat Background Seismicity As', BackgroundRupType.POINT)
+ erf.setParameter('Treat Background Seismicity As', BackgroundRupType.POINT) # noqa: F405
elif option == 'Single Random Strike Faults':
- erf.setParameter('Treat Background Seismicity As', BackgroundRupType.FINITE)
+ erf.setParameter('Treat Background Seismicity As', BackgroundRupType.FINITE) # noqa: F405
elif option == 'Two Perpendicular Faults':
erf.setParameter(
'Treat Background Seismicity As',
- BackgroundRupType.CROSSHAIR,
+ BackgroundRupType.CROSSHAIR, # noqa: F405
)
-def setERFProbabilityModelOptions(erf, selection):
+def setERFProbabilityModelOptions(erf, selection): # noqa: N802, D103
option = selection.get('Probability Model', None)
if option is None:
pass
elif option == 'Poisson':
- erf.setParameter('Probability Model', ProbabilityModelOptions.POISSON)
+ erf.setParameter('Probability Model', ProbabilityModelOptions.POISSON) # noqa: F405
elif option == 'UCERF3 BPT':
- erf.setParameter('Probability Model', ProbabilityModelOptions.U3_BPT)
+ erf.setParameter('Probability Model', ProbabilityModelOptions.U3_BPT) # noqa: F405
erf.setParameter(
'Historic Open Interval', selection.get('Historic Open Interval')
)
setERFMagDependentAperiodicityOptions(erf, selection)
setERFBPTAveragingTypeOptions(erf, selection)
elif option == 'UCERF3 Preferred Blend':
- erf.setParameter('Probability Model', ProbabilityModelOptions.U3_PREF_BLEND)
+ erf.setParameter('Probability Model', ProbabilityModelOptions.U3_PREF_BLEND) # noqa: F405
erf.setParameter(
'Historic Open Interval', selection.get('Historic Open Interval')
)
setERFBPTAveragingTypeOptions(erf, selection)
elif option == 'WG02 BPT':
- erf.setParameter('Probability Model', ProbabilityModelOptions.WG02_BPT)
+ erf.setParameter('Probability Model', ProbabilityModelOptions.WG02_BPT) # noqa: F405
erf.setParameter(
'Historic Open Interval', selection.get('Historic Open Interval')
)
setERFMagDependentAperiodicityOptions(erf, selection)
-def setERFMagDependentAperiodicityOptions(erf, selection):
+def setERFMagDependentAperiodicityOptions(erf, selection): # noqa: C901, N802, D103
option = selection.get('Aperiodicity', None)
if option is None:
pass
elif option == '0.4,0.3,0.2,0.1':
- erf.setParameter('Aperiodicity', MagDependentAperiodicityOptions.LOW_VALUES)
+ erf.setParameter('Aperiodicity', MagDependentAperiodicityOptions.LOW_VALUES) # noqa: F405
elif option == '0.5,0.4,0.3,0.2':
- erf.setParameter('Aperiodicity', MagDependentAperiodicityOptions.MID_VALUES)
+ erf.setParameter('Aperiodicity', MagDependentAperiodicityOptions.MID_VALUES) # noqa: F405
elif option == '0.6,0.5,0.4,0.3':
- erf.setParameter('Aperiodicity', MagDependentAperiodicityOptions.HIGH_VALUES)
+ erf.setParameter('Aperiodicity', MagDependentAperiodicityOptions.HIGH_VALUES) # noqa: F405
elif option == 'All 0.1':
erf.setParameter(
'Aperiodicity',
- MagDependentAperiodicityOptions.ALL_PT1_VALUES,
+ MagDependentAperiodicityOptions.ALL_PT1_VALUES, # noqa: F405
)
elif option == 'All 0.2':
erf.setParameter(
'Aperiodicity',
- MagDependentAperiodicityOptions.ALL_PT2_VALUES,
+ MagDependentAperiodicityOptions.ALL_PT2_VALUES, # noqa: F405
)
elif option == 'All 0.3':
erf.setParameter(
'Aperiodicity',
- MagDependentAperiodicityOptions.ALL_PT3_VALUES,
+ MagDependentAperiodicityOptions.ALL_PT3_VALUES, # noqa: F405
)
elif option == 'All 0.4':
erf.setParameter(
'Aperiodicity',
- MagDependentAperiodicityOptions.ALL_PT4_VALUES,
+ MagDependentAperiodicityOptions.ALL_PT4_VALUES, # noqa: F405
)
elif option == 'All 0.5':
erf.setParameter(
'Aperiodicity',
- MagDependentAperiodicityOptions.ALL_PT5_VALUES,
+ MagDependentAperiodicityOptions.ALL_PT5_VALUES, # noqa: F405
)
elif option == 'All 0.6':
erf.setParameter(
'Aperiodicity',
- MagDependentAperiodicityOptions.ALL_PT6_VALUES,
+ MagDependentAperiodicityOptions.ALL_PT6_VALUES, # noqa: F405
)
elif option == 'All 0.7':
erf.setParameter(
'Aperiodicity',
- MagDependentAperiodicityOptions.ALL_PT7_VALUES,
+ MagDependentAperiodicityOptions.ALL_PT7_VALUES, # noqa: F405
)
elif option == 'All 0.8':
erf.setParameter(
'Aperiodicity',
- MagDependentAperiodicityOptions.ALL_PT8_VALUES,
+ MagDependentAperiodicityOptions.ALL_PT8_VALUES, # noqa: F405
)
-def setERFBPTAveragingTypeOptions(erf, selection):
+def setERFBPTAveragingTypeOptions(erf, selection): # noqa: N802, D103
option = selection.get('BPT Averaging Type', None)
if option is None:
pass
elif option == 'AveRI and AveTimeSince':
erf.setParameter(
'BPT Averaging Type',
- BPTAveragingTypeOptions.AVE_RI_AVE_TIME_SINCE,
+ BPTAveragingTypeOptions.AVE_RI_AVE_TIME_SINCE, # noqa: F405
)
elif option == 'AveRI and AveNormTimeSince':
erf.setParameter(
'BPT Averaging Type',
- BPTAveragingTypeOptions.AVE_RI_AVE_NORM_TIME_SINCE,
+ BPTAveragingTypeOptions.AVE_RI_AVE_NORM_TIME_SINCE, # noqa: F405
)
elif option == 'AveRate and AveNormTimeSince':
erf.setParameter(
'BPT Averaging Type',
- BPTAveragingTypeOptions.AVE_RATE_AVE_NORM_TIME_SINCE,
+ BPTAveragingTypeOptions.AVE_RATE_AVE_NORM_TIME_SINCE, # noqa: F405
)
-def get_source_rupture(erf, source_index, rupture_index):
- rupSource = erf.getSource(source_index)
+def get_source_rupture(erf, source_index, rupture_index): # noqa: D103
+ rupSource = erf.getSource(source_index) # noqa: N806
ruptures = rupSource.getRuptureList()
rupture = ruptures.get(rupture_index)
return rupSource, rupture
-def get_source_distance(erf, source_index, lat, lon):
- rupSource = erf.getSource(source_index)
- sourceSurface = rupSource.getSourceSurface()
+def get_source_distance(erf, source_index, lat, lon): # noqa: D103
+ rupSource = erf.getSource(source_index) # noqa: N806
+ sourceSurface = rupSource.getSourceSurface() # noqa: N806
# print(lon)
# print(lat)
- distToSource = []
+ distToSource = [] # noqa: N806
for i in range(len(lat)):
- distToSource.append(
- float(sourceSurface.getDistanceRup(Location(lat[i], lon[i])))
+ distToSource.append( # noqa: PERF401
+ float(sourceSurface.getDistanceRup(Location(lat[i], lon[i]))) # noqa: F405
)
return distToSource
-def get_rupture_distance(erf, source_index, rupture_index, lat, lon):
- rupSource = erf.getSource(source_index)
- rupSurface = rupSource.getRupture(rupture_index).getRuptureSurface()
- distToRupture = []
+def get_rupture_distance(erf, source_index, rupture_index, lat, lon): # noqa: D103
+ rupSource = erf.getSource(source_index) # noqa: N806
+ rupSurface = rupSource.getRupture(rupture_index).getRuptureSurface() # noqa: N806
+ distToRupture = [] # noqa: N806
for i in range(len(lat)):
- distToRupture.append(
- float(rupSurface.getDistanceRup(Location(lat[i], lon[i])))
+ distToRupture.append( # noqa: PERF401
+ float(rupSurface.getDistanceRup(Location(lat[i], lon[i]))) # noqa: F405
)
return distToRupture
-def get_rupture_info_CY2014(erf, source_index, rupture_index, siteList):
- rupSource = erf.getSource(source_index)
- rupList = rupSource.getRuptureList()
- rupSurface = rupList.get(rupture_index).getRuptureSurface()
+def get_rupture_info_CY2014(erf, source_index, rupture_index, siteList): # noqa: N802, N803, D103
+ rupSource = erf.getSource(source_index) # noqa: N806
+ rupList = rupSource.getRuptureList() # noqa: N806
+ rupSurface = rupList.get(rupture_index).getRuptureSurface() # noqa: N806
if rupList.get(rupture_index).getHypocenterLocation() is None:
# https://github.com/opensha/opensha/blob/master/src/main/java/org/opensha/nshmp2/imr/ngaw2/NSHMP14_WUS_CB.java#L242
dip = float(rupSurface.getAveDip())
width = float(rupSurface.getAveWidth())
- zTop = float(rupSurface.getAveRupTopDepth())
- zHyp = zTop + np.sin(dip / 180.0 * np.pi) * width / 2.0
+ zTop = float(rupSurface.getAveRupTopDepth()) # noqa: N806
+ zHyp = zTop + np.sin(dip / 180.0 * np.pi) * width / 2.0 # noqa: N806
else:
- zHyp = rupList.get(rupture_index).getHypocenterLocation().getDepth()
+ zHyp = rupList.get(rupture_index).getHypocenterLocation().getDepth() # noqa: N806
for i in range(len(siteList)):
siteList[i].update(
{
'rRup': float(
rupSurface.getDistanceRup(
- Location(siteList[i]['lat'], siteList[i]['lon'])
+ Location(siteList[i]['lat'], siteList[i]['lon']) # noqa: F405
)
)
}
@@ -398,7 +398,7 @@ def get_rupture_info_CY2014(erf, source_index, rupture_index, siteList):
{
'rJB': float(
rupSurface.getDistanceJB(
- Location(siteList[i]['lat'], siteList[i]['lon'])
+ Location(siteList[i]['lat'], siteList[i]['lon']) # noqa: F405
)
)
}
@@ -407,7 +407,7 @@ def get_rupture_info_CY2014(erf, source_index, rupture_index, siteList):
{
'rX': float(
rupSurface.getDistanceX(
- Location(siteList[i]['lat'], siteList[i]['lon'])
+ Location(siteList[i]['lat'], siteList[i]['lon']) # noqa: F405
)
)
}
@@ -422,7 +422,7 @@ def get_rupture_info_CY2014(erf, source_index, rupture_index, siteList):
return site_rup_info, siteList
-def horzDistanceFast(lat1, lon1, lat2, lon2):
+def horzDistanceFast(lat1, lon1, lat2, lon2): # noqa: N802, D103
lat1 = lat1 / 180 * np.pi
lon1 = lon1 / 180 * np.pi
lat2 = lat2 / 180 * np.pi
@@ -431,20 +431,20 @@ def horzDistanceFast(lat1, lon1, lat2, lon2):
dlat = np.abs(lat2 - lat1)
a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2
c = 2 * np.arcsin(np.sqrt(a))
- EARTH_RADIUS_MEAN = 6371.0072 # https://github.com/opensha/opensha/blob/master/src/main/java/org/opensha/commons/geo/GeoTools.java#L22
+ EARTH_RADIUS_MEAN = 6371.0072 # https://github.com/opensha/opensha/blob/master/src/main/java/org/opensha/commons/geo/GeoTools.java#L22 # noqa: N806
# return EARTH_RADIUS_MEAN * np.sqrt((dLat * dLat) + (dLon * dLon))
return EARTH_RADIUS_MEAN * c
-def getPtSrcDistCorr(horzDist, mag, type):
+def getPtSrcDistCorr(horzDist, mag, type): # noqa: A002, N802, N803, D103
# https://github.com/opensha/opensha/blob/master/src/main/java/org/opensha/sha/faultSurface/utils/PtSrcDistCorr.java#L20
if type == 'FIELD':
- rupLen = np.power(10.0, -3.22 + 0.69 * mag)
+ rupLen = np.power(10.0, -3.22 + 0.69 * mag) # noqa: N806
return 0.7071 + (1.0 - 0.7071) / (
1 + np.power(rupLen / (horzDist * 0.87), 1.1)
)
- elif type == 'NSHMP08':
- print(
+ elif type == 'NSHMP08': # noqa: RET505
+ print( # noqa: T201
'The NSHMP08 rJB correction has not been implemented. corr=1.0 is used instead'
)
# https://github.com/opensha/opensha/blob/master/src/main/java/org/opensha/sha/faultSurface/utils/PtSrcDistCorr.java#L20
@@ -453,18 +453,18 @@ def getPtSrcDistCorr(horzDist, mag, type):
return 1.0
-def get_PointSource_info_CY2014(source_info, siteList):
+def get_PointSource_info_CY2014(source_info, siteList): # noqa: N802, N803, D103
# https://github.com/opensha/opensha/blob/master/src/main/java/org/opensha/sha/faultSurface/PointSurface.java#L118
- sourceLat = source_info['Location']['Latitude']
- sourceLon = source_info['Location']['Longitude']
- sourceDepth = source_info['Location']['Depth']
+ sourceLat = source_info['Location']['Latitude'] # noqa: N806
+ sourceLon = source_info['Location']['Longitude'] # noqa: N806
+ sourceDepth = source_info['Location']['Depth'] # noqa: N806
for i in range(len(siteList)):
- siteLat = siteList[i]['lat']
- siteLon = siteList[i]['lon']
- horiD = horzDistanceFast(sourceLat, sourceLon, siteLat, siteLon)
- rJB = horiD * getPtSrcDistCorr(horiD, source_info['Magnitude'], 'NONE')
- rRup = np.sqrt(rJB**2 + sourceDepth**2)
- rX = 0.0
+ siteLat = siteList[i]['lat'] # noqa: N806
+ siteLon = siteList[i]['lon'] # noqa: N806
+ horiD = horzDistanceFast(sourceLat, sourceLon, siteLat, siteLon) # noqa: N806
+ rJB = horiD * getPtSrcDistCorr(horiD, source_info['Magnitude'], 'NONE') # noqa: N806
+ rRup = np.sqrt(rJB**2 + sourceDepth**2) # noqa: N806
+ rX = 0.0 # noqa: N806
siteList[i].update({'rRup': rRup})
siteList[i].update({'rJB': rJB})
siteList[i].update({'rX': rX})
@@ -477,31 +477,31 @@ def get_PointSource_info_CY2014(source_info, siteList):
return site_rup_info, siteList
-def export_to_json(
+def export_to_json( # noqa: C901, D103
erf,
site_loc,
outfile=None,
- EqName=None,
- minMag=0.0,
- maxMag=10.0,
- maxDistance=1000.0,
+ EqName=None, # noqa: N803
+ minMag=0.0, # noqa: N803
+ maxMag=10.0, # noqa: N803
+ maxDistance=1000.0, # noqa: N803
):
# Initializing
erf_data = {'type': 'FeatureCollection'}
- site_loc = Location(site_loc[0], site_loc[1])
- site = Site(site_loc)
+ site_loc = Location(site_loc[0], site_loc[1]) # noqa: F405
+ site = Site(site_loc) # noqa: F405
# Total source number
num_sources = erf.getNumSources()
source_tag = []
source_dist = []
for i in range(num_sources):
- rupSource = erf.getSource(i)
- distanceToSource = rupSource.getMinDistance(site)
+ rupSource = erf.getSource(i) # noqa: N806
+ distanceToSource = rupSource.getMinDistance(site) # noqa: N806
# sourceSurface = rupSource.getSourceSurface()
# distanceToSource = sourceSurface.getDistanceRup(site_loc)
source_tag.append(i)
source_dist.append(distanceToSource)
- df = pd.DataFrame.from_dict({'sourceID': source_tag, 'sourceDist': source_dist})
+ df = pd.DataFrame.from_dict({'sourceID': source_tag, 'sourceDist': source_dist}) # noqa: PD901
# Sorting sources
source_collection = df.sort_values(['sourceDist'], ascending=(True))
source_collection = source_collection[
@@ -511,26 +511,26 @@ def export_to_json(
feature_collection = []
for i in tqdm(range(source_collection.shape[0]), desc='Sources'):
source_index = source_collection.iloc[i, 0]
- distanceToSource = source_collection.iloc[i, 1]
+ distanceToSource = source_collection.iloc[i, 1] # noqa: N806
# Getting rupture distances
- rupSource = erf.getSource(source_index)
+ rupSource = erf.getSource(source_index) # noqa: N806
try:
- rupList = rupSource.getRuptureList()
- except:
- numOfRup = rupSource.getNumRuptures()
- rupList = []
+ rupList = rupSource.getRuptureList() # noqa: N806
+ except: # noqa: E722
+ numOfRup = rupSource.getNumRuptures() # noqa: N806
+ rupList = [] # noqa: N806
for n in range(numOfRup):
rupList.append(rupSource.getRupture(n))
- rupList = ArrayList(rupList)
+ rupList = ArrayList(rupList) # noqa: N806, F405
rup_tag = []
rup_dist = []
for j in range(rupList.size()):
- ruptureSurface = rupList.get(j).getRuptureSurface()
+ ruptureSurface = rupList.get(j).getRuptureSurface() # noqa: N806
# If pointsource rupture distance correction
- if isinstance(ruptureSurface, PointSurface):
+ if isinstance(ruptureSurface, PointSurface): # noqa: F405
# or 'FIELD' or 'NSHMP08'
- distCorrType = PtSrcDistCorr.Type.NONE
- (PointSurface @ ruptureSurface).setDistCorrMagAndType(
+ distCorrType = PtSrcDistCorr.Type.NONE # noqa: N806
+ (PointSurface @ ruptureSurface).setDistCorrMagAndType( # noqa: F405
rupList.get(j).getMag(), distCorrType
)
cur_dist = ruptureSurface.getDistanceRup(site_loc)
@@ -540,12 +540,12 @@ def export_to_json(
else:
# exceeding the maxDistance requirement
rup_dist.append(-1.0)
- df = pd.DataFrame.from_dict({'rupID': rup_tag, 'rupDist': rup_dist})
+ df = pd.DataFrame.from_dict({'rupID': rup_tag, 'rupDist': rup_dist}) # noqa: PD901
# Sorting
rup_collection = df.sort_values(['rupDist'], ascending=(True))
# Preparing the dict of ruptures
for j in range(rupList.size()):
- cur_dict = dict()
+ cur_dict = dict() # noqa: C408
cur_dict.update({'type': 'Feature'})
rup_index = rup_collection.iloc[j, 0]
cur_dist = rup_collection.iloc[j, 1]
@@ -556,15 +556,15 @@ def export_to_json(
maf = rupture.getMeanAnnualRate(erf.getTimeSpan().getDuration())
if maf <= 0.0:
continue
- ruptureSurface = rupture.getRuptureSurface()
+ ruptureSurface = rupture.getRuptureSurface() # noqa: N806
# Properties
- cur_dict['properties'] = dict()
+ cur_dict['properties'] = dict() # noqa: C408
name = str(rupSource.getName())
if EqName is not None:
if EqName not in name:
continue
cur_dict['properties'].update({'Name': name})
- Mag = float(rupture.getMag())
+ Mag = float(rupture.getMag()) # noqa: N806
if (Mag < minMag) or (Mag > maxMag):
continue
cur_dict['properties'].update({'Magnitude': Mag})
@@ -574,23 +574,23 @@ def export_to_json(
# these calls are time-consuming, so only run them if one needs
# detailed outputs of the sources
cur_dict['properties'].update({'Distance': float(cur_dist)})
- distanceRup = rupture.getRuptureSurface().getDistanceRup(site_loc)
+ distanceRup = rupture.getRuptureSurface().getDistanceRup(site_loc) # noqa: N806
cur_dict['properties'].update({'DistanceRup': float(distanceRup)})
- distanceSeis = rupture.getRuptureSurface().getDistanceSeis(site_loc)
+ distanceSeis = rupture.getRuptureSurface().getDistanceSeis(site_loc) # noqa: N806
cur_dict['properties'].update({'DistanceSeis': float(distanceSeis)})
- distanceJB = rupture.getRuptureSurface().getDistanceJB(site_loc)
+ distanceJB = rupture.getRuptureSurface().getDistanceJB(site_loc) # noqa: N806
cur_dict['properties'].update({'DistanceJB': float(distanceJB)})
- distanceX = rupture.getRuptureSurface().getDistanceX(site_loc)
+ distanceX = rupture.getRuptureSurface().getDistanceX(site_loc) # noqa: N806
cur_dict['properties'].update({'DistanceX': float(distanceX)})
- Prob = rupture.getProbability()
+ Prob = rupture.getProbability() # noqa: N806
cur_dict['properties'].update({'Probability': float(Prob)})
maf = rupture.getMeanAnnualRate(erf.getTimeSpan().getDuration())
cur_dict['properties'].update({'MeanAnnualRate': abs(float(maf))})
# Geometry
- cur_dict['geometry'] = dict()
+ cur_dict['geometry'] = dict() # noqa: C408
if ruptureSurface.isPointSurface():
# Point source
- pointSurface = ruptureSurface
+ pointSurface = ruptureSurface # noqa: N806
location = pointSurface.getLocation()
cur_dict['geometry'].update({'type': 'Point'})
cur_dict['geometry'].update(
@@ -605,11 +605,11 @@ def export_to_json(
# Line source
try:
trace = ruptureSurface.getUpperEdge()
- except:
+ except: # noqa: E722
trace = ruptureSurface.getEvenlyDiscritizedUpperEdge()
coordinates = []
for k in trace:
- coordinates.append(
+ coordinates.append( # noqa: PERF401
[float(k.getLongitude()), float(k.getLatitude())]
)
cur_dict['geometry'].update({'type': 'LineString'})
@@ -624,7 +624,7 @@ def export_to_json(
feature_collection_sorted = [feature_collection[i] for i in sort_ids]
del feature_collection
erf_data.update({'features': feature_collection_sorted})
- print(
+ print( # noqa: T201
f'FetchOpenSHA: total {len(feature_collection_sorted)} ruptures are collected.'
)
# num_preview = 1000
@@ -636,10 +636,10 @@ def export_to_json(
# import time
# startTime = time.process_time_ns()
if outfile is not None:
- print(
+ print( # noqa: T201
f'The collected ruptures are sorted by MeanAnnualRate and saved in {outfile}'
)
- with open(outfile, 'w') as f:
+ with open(outfile, 'w') as f: # noqa: PTH123
ujson.dump(erf_data, f, indent=2)
# print(f"Time consumed by json dump is {(time.process_time_ns()-startTime)/1e9}s")
@@ -648,27 +648,27 @@ def export_to_json(
return erf_data
-def CreateIMRInstance(gmpe_name):
+def CreateIMRInstance(gmpe_name): # noqa: N802, D103
# GMPE name map
gmpe_map = {
- str(ASK_2014.NAME): ASK_2014_Wrapper.class_.getName(),
- str(BSSA_2014.NAME): BSSA_2014_Wrapper.class_.getName(),
- str(CB_2014.NAME): CB_2014_Wrapper.class_.getName(),
- str(CY_2014.NAME): CY_2014_Wrapper.class_.getName(),
- str(KS_2006_AttenRel.NAME): KS_2006_AttenRel.class_.getName(),
+ str(ASK_2014.NAME): ASK_2014_Wrapper.class_.getName(), # noqa: F405
+ str(BSSA_2014.NAME): BSSA_2014_Wrapper.class_.getName(), # noqa: F405
+ str(CB_2014.NAME): CB_2014_Wrapper.class_.getName(), # noqa: F405
+ str(CY_2014.NAME): CY_2014_Wrapper.class_.getName(), # noqa: F405
+ str(KS_2006_AttenRel.NAME): KS_2006_AttenRel.class_.getName(), # noqa: F405
str(
- BommerEtAl_2009_AttenRel.NAME
- ): BommerEtAl_2009_AttenRel.class_.getName(),
+ BommerEtAl_2009_AttenRel.NAME # noqa: F405
+ ): BommerEtAl_2009_AttenRel.class_.getName(), # noqa: F405
str(
- AfshariStewart_2016_AttenRel.NAME
- ): AfshariStewart_2016_AttenRel.class_.getName(),
+ AfshariStewart_2016_AttenRel.NAME # noqa: F405
+ ): AfshariStewart_2016_AttenRel.class_.getName(), # noqa: F405
}
# Mapping GMPE name
- imrClassName = gmpe_map.get(gmpe_name)
+ imrClassName = gmpe_map.get(gmpe_name) # noqa: N806
if imrClassName is None:
return imrClassName
# Getting the java class
- imrClass = Class.forName(imrClassName)
+ imrClass = Class.forName(imrClassName) # noqa: N806, F405
ctor = imrClass.getConstructor()
imr = ctor.newInstance()
# Setting default parameters
@@ -677,84 +677,84 @@ def CreateIMRInstance(gmpe_name):
return imr
-def get_DataSource(paramName, siteData):
- typeMap = SiteTranslator.DATA_TYPE_PARAM_NAME_MAP
- for dataType in typeMap.getTypesForParameterName(paramName):
- if dataType == SiteData.TYPE_VS30:
- for dataValue in siteData:
+def get_DataSource(paramName, siteData): # noqa: N802, N803, D103
+ typeMap = SiteTranslator.DATA_TYPE_PARAM_NAME_MAP # noqa: N806, F405
+ for dataType in typeMap.getTypesForParameterName(paramName): # noqa: N806
+ if dataType == SiteData.TYPE_VS30: # noqa: F405
+ for dataValue in siteData: # noqa: N806
if dataValue.getDataType() != dataType:
continue
- vs30 = Double(dataValue.getValue())
+ vs30 = Double(dataValue.getValue()) # noqa: F405
if (not vs30.isNaN()) and (vs30 > 0.0):
return dataValue.getSourceName()
- elif (dataType == SiteData.TYPE_DEPTH_TO_1_0) or (
- dataType == SiteData.TYPE_DEPTH_TO_2_5
+ elif (dataType == SiteData.TYPE_DEPTH_TO_1_0) or ( # noqa: F405, PLR1714
+ dataType == SiteData.TYPE_DEPTH_TO_2_5 # noqa: F405
):
- for dataValue in siteData:
+ for dataValue in siteData: # noqa: N806
if dataValue.getDataType() != dataType:
continue
- depth = Double(dataValue.getValue())
+ depth = Double(dataValue.getValue()) # noqa: F405
if (not depth.isNaN()) and (depth > 0.0):
return dataValue.getSourceName()
return 1
-def get_site_prop(gmpe_name, siteSpec):
+def get_site_prop(gmpe_name, siteSpec): # noqa: C901, N803, D103
# GMPE
try:
imr = CreateIMRInstance(gmpe_name)
- except:
- print('Please check GMPE name.')
+ except: # noqa: E722
+ print('Please check GMPE name.') # noqa: T201
return 1
# Site data
- sites = ArrayList()
+ sites = ArrayList() # noqa: F405
for cur_site in siteSpec:
- cur_loc = Location(
+ cur_loc = Location( # noqa: F405
cur_site['Location']['Latitude'], cur_site['Location']['Longitude']
)
- sites.add(Site(cur_loc))
- siteDataProviders = OrderedSiteDataProviderList.createSiteDataProviderDefaults()
+ sites.add(Site(cur_loc)) # noqa: F405
+ siteDataProviders = OrderedSiteDataProviderList.createSiteDataProviderDefaults() # noqa: N806, F405
try:
- availableSiteData = siteDataProviders.getAllAvailableData(sites)
- except:
- availableSiteData = []
- print(
+ availableSiteData = siteDataProviders.getAllAvailableData(sites) # noqa: N806
+ except: # noqa: E722
+ availableSiteData = [] # noqa: N806
+ print( # noqa: T201
'remote getAllAvailableData is not available temporarily, will use site Vs30 in the site csv file.'
)
# return 1
- siteTrans = SiteTranslator()
+ siteTrans = SiteTranslator() # noqa: N806, F405
# Looping over all sites
site_prop = []
for i in range(len(siteSpec)):
- site_tmp = dict()
+ site_tmp = dict() # noqa: C408
# Current site
site = sites.get(i)
# Location
cur_site = siteSpec[i]
- locResults = {
+ locResults = { # noqa: N806
'Latitude': cur_site['Location']['Latitude'],
'Longitude': cur_site['Location']['Longitude'],
}
- cur_loc = Location(
+ cur_loc = Location( # noqa: F405
cur_site['Location']['Latitude'], cur_site['Location']['Longitude']
)
- siteDataValues = ArrayList()
+ siteDataValues = ArrayList() # noqa: N806, F405
for j in range(len(availableSiteData)):
siteDataValues.add(availableSiteData.get(j).getValue(i))
- imrSiteParams = imr.getSiteParams()
- siteDataResults = []
+ imrSiteParams = imr.getSiteParams() # noqa: N806
+ siteDataResults = [] # noqa: N806
# Setting site parameters
for j in range(imrSiteParams.size()):
- siteParam = imrSiteParams.getByIndex(j)
- newParam = Parameter.clone(siteParam)
+ siteParam = imrSiteParams.getByIndex(j) # noqa: N806
+ newParam = Parameter.clone(siteParam) # noqa: N806, F405
if siteDataValues.size() > 0:
- siteDataFound = siteTrans.setParameterValue(newParam, siteDataValues)
+ siteDataFound = siteTrans.setParameterValue(newParam, siteDataValues) # noqa: N806
else:
- siteDataFound = False
+ siteDataFound = False # noqa: N806
if str(newParam.getName()) == 'Vs30' and bool(
cur_site.get('Vs30', None)
):
- newParam.setValue(Double(cur_site['Vs30']))
+ newParam.setValue(Double(cur_site['Vs30'])) # noqa: F405
siteDataResults.append(
{
'Type': 'Vs30',
@@ -811,11 +811,11 @@ def get_site_prop(gmpe_name, siteSpec):
return siteSpec, sites, site_prop
-def get_IM(
+def get_IM( # noqa: C901, N802, D103
gmpe_info,
erf,
sites,
- siteSpec,
+ siteSpec, # noqa: N803
site_prop,
source_info,
station_info,
@@ -826,19 +826,19 @@ def get_IM(
# Creating intensity measure relationship instance
try:
imr = CreateIMRInstance(gmpe_name)
- except:
- print('Please check GMPE name.')
+ except: # noqa: E722
+ print('Please check GMPE name.') # noqa: T201
return 1, station_info
# Getting supported intensity measure types
ims = imr.getSupportedIntensityMeasures()
- saParam = ims.getParameter(SA_Param.NAME)
- supportedPeriods = saParam.getPeriodParam().getPeriods()
- Arrays.sort(supportedPeriods)
+ saParam = ims.getParameter(SA_Param.NAME) # noqa: N806, F405
+ supportedPeriods = saParam.getPeriodParam().getPeriods() # noqa: N806
+ Arrays.sort(supportedPeriods) # noqa: F405
# Rupture
- eqRup = EqkRupture()
+ eqRup = EqkRupture() # noqa: N806, F405
if source_info['Type'] == 'PointSource':
eqRup.setMag(source_info['Magnitude'])
- eqRupLocation = Location(
+ eqRupLocation = Location( # noqa: N806, F405
source_info['Location']['Latitude'],
source_info['Location']['Longitude'],
source_info['Location']['Depth'],
@@ -846,137 +846,137 @@ def get_IM(
eqRup.setPointSurface(eqRupLocation, source_info['AverageDip'])
eqRup.setAveRake(source_info['AverageRake'])
magnitude = source_info['Magnitude']
- meanAnnualRate = None
+ meanAnnualRate = None # noqa: N806
elif source_info['Type'] == 'ERF':
- timeSpan = TimeSpan(TimeSpan.NONE, TimeSpan.YEARS)
- erfParams = source_info.get('Parameters', None)
+ timeSpan = TimeSpan(TimeSpan.NONE, TimeSpan.YEARS) # noqa: N806, F405
+ erfParams = source_info.get('Parameters', None) # noqa: N806
# Additional parameters (if any)
if erfParams is not None:
for k in erfParams.keys:
erf.setParameter(k, erfParams[k])
# Time span
- timeSpan = erf.getTimeSpan()
+ timeSpan = erf.getTimeSpan() # noqa: N806
# Source
- eqSource = erf.getSource(source_info['SourceIndex'])
+ eqSource = erf.getSource(source_info['SourceIndex']) # noqa: N806
eqSource.getName()
# Rupture
- eqRup = eqSource.getRupture(source_info['RuptureIndex'])
+ eqRup = eqSource.getRupture(source_info['RuptureIndex']) # noqa: N806
# Properties
magnitude = eqRup.getMag()
- averageDip = eqRup.getRuptureSurface().getAveDip()
- averageRake = eqRup.getAveRake()
+ averageDip = eqRup.getRuptureSurface().getAveDip() # noqa: N806, F841
+ averageRake = eqRup.getAveRake() # noqa: N806, F841
# Probability
- probEqRup = eqRup
- probability = probEqRup.getProbability()
+ probEqRup = eqRup # noqa: N806
+ probability = probEqRup.getProbability() # noqa: F841
# MAF
- meanAnnualRate = probEqRup.getMeanAnnualRate(timeSpan.getDuration())
+ meanAnnualRate = probEqRup.getMeanAnnualRate(timeSpan.getDuration()) # noqa: N806
# Rupture surface
- surface = eqRup.getRuptureSurface()
+ surface = eqRup.getRuptureSurface() # noqa: F841
# Setting up imr
imr.setEqkRupture(eqRup)
- imrParams = gmpe_info['Parameters']
+ imrParams = gmpe_info['Parameters'] # noqa: N806
if bool(imrParams):
- for k in imrParams.keys():
+ for k in imrParams.keys(): # noqa: SIM118
imr.getParameter(k).setValue(imrParams[k])
# Station
if station_info['Type'] == 'SiteList':
- siteSpec = station_info['SiteList']
+ siteSpec = station_info['SiteList'] # noqa: N806
# Intensity measure
periods = im_info.get('Periods', None)
if periods is not None:
periods = supportedPeriods
- tag_SA = False
- tag_PGA = False
- tag_PGV = False
- tag_Ds575 = False
- tag_Ds595 = False
+ tag_SA = False # noqa: N806
+ tag_PGA = False # noqa: N806
+ tag_PGV = False # noqa: N806
+ tag_Ds575 = False # noqa: N806, F841
+ tag_Ds595 = False # noqa: N806, F841
if 'SA' in im_info['Type']:
- tag_SA = True
+ tag_SA = True # noqa: N806
if 'PGA' in im_info['Type']:
- tag_PGA = True
+ tag_PGA = True # noqa: N806
if 'PGV' in im_info['Type']:
- tag_PGV = True
+ tag_PGV = True # noqa: N806
# Looping over sites
gm_collector = []
for i in range(len(siteSpec)):
- gmResults = site_prop[i]
+ gmResults = site_prop[i] # noqa: N806
# Current site
site = sites.get(i)
# Location
- cur_site = siteSpec[i]
+ cur_site = siteSpec[i] # noqa: F841
# Set up the site in the imr
imr.setSite(site)
try:
- stdDevParam = imr.getParameter(StdDevTypeParam.NAME)
- hasIEStats = stdDevParam.isAllowed(
- StdDevTypeParam.STD_DEV_TYPE_INTER
- ) and stdDevParam.isAllowed(StdDevTypeParam.STD_DEV_TYPE_INTRA)
- except:
- stdDevParaam = None
- hasIEStats = False
- cur_T = im_info.get('Periods', None)
+ stdDevParam = imr.getParameter(StdDevTypeParam.NAME) # noqa: N806, F405
+ hasIEStats = stdDevParam.isAllowed( # noqa: N806
+ StdDevTypeParam.STD_DEV_TYPE_INTER # noqa: F405
+ ) and stdDevParam.isAllowed(StdDevTypeParam.STD_DEV_TYPE_INTRA) # noqa: F405
+ except: # noqa: E722
+ stdDevParaam = None # noqa: N806, F841
+ hasIEStats = False # noqa: N806
+ cur_T = im_info.get('Periods', None) # noqa: N806
if tag_SA:
- saResult = {'Mean': [], 'TotalStdDev': []}
+ saResult = {'Mean': [], 'TotalStdDev': []} # noqa: N806
if hasIEStats:
saResult.update({'InterEvStdDev': []})
saResult.update({'IntraEvStdDev': []})
imr.setIntensityMeasure('SA')
- imtParam = imr.getIntensityMeasure()
- for Tj in cur_T:
- imtParam.getIndependentParameter(PeriodParam.NAME).setValue(
+ imtParam = imr.getIntensityMeasure() # noqa: N806
+ for Tj in cur_T: # noqa: N806
+ imtParam.getIndependentParameter(PeriodParam.NAME).setValue( # noqa: F405
float(Tj)
)
mean = imr.getMean()
saResult['Mean'].append(float(mean))
if stdDevParam is not None:
- stdDevParam.setValue(StdDevTypeParam.STD_DEV_TYPE_TOTAL)
- stdDev = imr.getStdDev()
+ stdDevParam.setValue(StdDevTypeParam.STD_DEV_TYPE_TOTAL) # noqa: F405
+ stdDev = imr.getStdDev() # noqa: N806
saResult['TotalStdDev'].append(float(stdDev))
if hasIEStats:
- stdDevParam.setValue(StdDevTypeParam.STD_DEV_TYPE_INTER)
- interEvStdDev = imr.getStdDev()
- stdDevParam.setValue(StdDevTypeParam.STD_DEV_TYPE_INTRA)
- intraEvStdDev = imr.getStdDev()
+ stdDevParam.setValue(StdDevTypeParam.STD_DEV_TYPE_INTER) # noqa: F405
+ interEvStdDev = imr.getStdDev() # noqa: N806
+ stdDevParam.setValue(StdDevTypeParam.STD_DEV_TYPE_INTRA) # noqa: F405
+ intraEvStdDev = imr.getStdDev() # noqa: N806
saResult['InterEvStdDev'].append(float(interEvStdDev))
saResult['IntraEvStdDev'].append(float(intraEvStdDev))
gmResults.update({'lnSA': saResult})
if tag_PGA:
# for PGV current T = 0
- cur_T = [0.00]
- pgaResult = {'Mean': [], 'TotalStdDev': []}
+ cur_T = [0.00] # noqa: N806
+ pgaResult = {'Mean': [], 'TotalStdDev': []} # noqa: N806
if hasIEStats:
pgaResult.update({'InterEvStdDev': []})
pgaResult.update({'IntraEvStdDev': []})
imr.setIntensityMeasure('PGA')
mean = imr.getMean()
pgaResult['Mean'].append(float(mean))
- stdDev = imr.getStdDev()
+ stdDev = imr.getStdDev() # noqa: N806
pgaResult['TotalStdDev'].append(float(stdDev))
if hasIEStats:
- stdDevParam.setValue(StdDevTypeParam.STD_DEV_TYPE_INTER)
- interEvStdDev = imr.getStdDev()
- stdDevParam.setValue(StdDevTypeParam.STD_DEV_TYPE_INTRA)
- intraEvStdDev = imr.getStdDev()
+ stdDevParam.setValue(StdDevTypeParam.STD_DEV_TYPE_INTER) # noqa: F405
+ interEvStdDev = imr.getStdDev() # noqa: N806
+ stdDevParam.setValue(StdDevTypeParam.STD_DEV_TYPE_INTRA) # noqa: F405
+ intraEvStdDev = imr.getStdDev() # noqa: N806
pgaResult['InterEvStdDev'].append(float(interEvStdDev))
pgaResult['IntraEvStdDev'].append(float(intraEvStdDev))
gmResults.update({'lnPGA': pgaResult})
if tag_PGV:
# for PGV current T = 0
- cur_T = [0.00]
- pgvResult = {'Mean': [], 'TotalStdDev': []}
+ cur_T = [0.00] # noqa: N806
+ pgvResult = {'Mean': [], 'TotalStdDev': []} # noqa: N806
if hasIEStats:
pgvResult.update({'InterEvStdDev': []})
pgvResult.update({'IntraEvStdDev': []})
imr.setIntensityMeasure('PGV')
mean = imr.getMean()
pgvResult['Mean'].append(float(mean))
- stdDev = imr.getStdDev()
+ stdDev = imr.getStdDev() # noqa: N806
pgvResult['TotalStdDev'].append(float(stdDev))
if hasIEStats:
- stdDevParam.setValue(StdDevTypeParam.STD_DEV_TYPE_INTER)
- interEvStdDev = imr.getStdDev()
- stdDevParam.setValue(StdDevTypeParam.STD_DEV_TYPE_INTRA)
- intraEvStdDev = imr.getStdDev()
+ stdDevParam.setValue(StdDevTypeParam.STD_DEV_TYPE_INTER) # noqa: F405
+ interEvStdDev = imr.getStdDev() # noqa: N806
+ stdDevParam.setValue(StdDevTypeParam.STD_DEV_TYPE_INTRA) # noqa: F405
+ intraEvStdDev = imr.getStdDev() # noqa: N806
pgvResult['InterEvStdDev'].append(float(interEvStdDev))
pgvResult['IntraEvStdDev'].append(float(intraEvStdDev))
gmResults.update({'lnPGV': pgvResult})
@@ -998,32 +998,32 @@ def get_IM(
return res, station_info
-def get_site_vs30_from_opensha(lat, lon, vs30model='CGS/Wills VS30 Map (2015)'):
+def get_site_vs30_from_opensha(lat, lon, vs30model='CGS/Wills VS30 Map (2015)'): # noqa: D103
# set up site java object
- sites = ArrayList()
+ sites = ArrayList() # noqa: F405
num_sites = len(lat)
for i in range(num_sites):
- sites.add(Site(Location(lat[i], lon[i])))
+ sites.add(Site(Location(lat[i], lon[i]))) # noqa: F405
# prepare site data java object
- siteDataProviders = OrderedSiteDataProviderList.createSiteDataProviderDefaults()
- siteData = siteDataProviders.getAllAvailableData(sites)
+ siteDataProviders = OrderedSiteDataProviderList.createSiteDataProviderDefaults() # noqa: N806, F405
+ siteData = siteDataProviders.getAllAvailableData(sites) # noqa: N806
# search name
vs30 = []
for i in range(int(siteData.size())):
- cur_siteData = siteData.get(i)
+ cur_siteData = siteData.get(i) # noqa: N806
if str(cur_siteData.getSourceName()) == vs30model:
vs30 = [
float(cur_siteData.getValue(x).getValue()) for x in range(num_sites)
]
break
- else:
+ else: # noqa: RET508
continue
# check if any nan (Wills Map return nan for offshore sites)
# Using global vs30 as default patch - 'Global Vs30 from Topographic Slope (Wald & Allen 2008)'
- if any([np.isnan(x) for x in vs30]):
+ if any([np.isnan(x) for x in vs30]): # noqa: C419
non_list = np.where(np.isnan(vs30))[0].tolist()
for i in non_list:
vs30[i] = float(siteData.get(3).getValue(i).getValue())
@@ -1032,12 +1032,12 @@ def get_site_vs30_from_opensha(lat, lon, vs30model='CGS/Wills VS30 Map (2015)'):
return vs30
-def get_site_z1pt0_from_opensha(lat, lon):
- sites = ArrayList()
- sites.add(Site(Location(lat, lon)))
+def get_site_z1pt0_from_opensha(lat, lon): # noqa: D103
+ sites = ArrayList() # noqa: F405
+ sites.add(Site(Location(lat, lon))) # noqa: F405
# prepare site data java object
- siteDataProviders = OrderedSiteDataProviderList.createSiteDataProviderDefaults()
- siteData = siteDataProviders.getAllAvailableData(sites)
+ siteDataProviders = OrderedSiteDataProviderList.createSiteDataProviderDefaults() # noqa: N806, F405
+ siteData = siteDataProviders.getAllAvailableData(sites) # noqa: N806
for data in siteData:
if data.getValue(0).getDataType() == 'Depth to Vs = 1.0 km/sec':
z1pt0 = float(data.getValue(0).getValue())
@@ -1046,12 +1046,12 @@ def get_site_z1pt0_from_opensha(lat, lon):
return z1pt0 * 1000.0
-def get_site_z2pt5_from_opensha(lat, lon):
- sites = ArrayList()
- sites.add(Site(Location(lat, lon)))
+def get_site_z2pt5_from_opensha(lat, lon): # noqa: D103
+ sites = ArrayList() # noqa: F405
+ sites.add(Site(Location(lat, lon))) # noqa: F405
# prepare site data java object
- siteDataProviders = OrderedSiteDataProviderList.createSiteDataProviderDefaults()
- siteData = siteDataProviders.getAllAvailableData(sites)
+ siteDataProviders = OrderedSiteDataProviderList.createSiteDataProviderDefaults() # noqa: N806, F405
+ siteData = siteDataProviders.getAllAvailableData(sites) # noqa: N806
for data in siteData:
if data.getValue(0).getDataType() == 'Depth to Vs = 2.5 km/sec':
z2pt5 = float(data.getValue(0).getValue())
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py b/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py
index fe6b8025d..72aa79283 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/GMSimulators.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -66,7 +66,7 @@
IM_CORR = {'INTER': IM_CORR_INTER, 'INTRA': IM_CORR_INTRA}
-def simulate_ground_motion(
+def simulate_ground_motion( # noqa: D103
stations,
im_raw_path,
im_list,
@@ -80,9 +80,9 @@ def simulate_ground_motion(
ln_im_mr = []
mag_maf = []
t_start = time.time()
- im_sampled = dict()
+ im_sampled = dict() # noqa: C408
if im_raw_path.endswith('.json'):
- with open(im_raw_path) as f:
+ with open(im_raw_path) as f: # noqa: PTH123
im_raw = ujson.load(f)
for i in eq_ids:
im_sampled.update({i: im_raw[str(i)]})
@@ -96,7 +96,7 @@ def simulate_ground_motion(
elif im_raw_path.endswith('.hdf5'):
with h5py.File(im_raw_path, 'r') as f:
for i in eq_ids:
- sample = dict()
+ sample = dict() # noqa: C408
sample.update({'Mean': f[str(i)]['Mean'][()]})
sample.update({'InterEvStdDev': f[str(i)]['InterEvStdDev'][()]})
sample.update({'IntraEvStdDev': f[str(i)]['IntraEvStdDev'][()]})
@@ -109,7 +109,7 @@ def simulate_ground_motion(
im_info=im_info,
)
else:
- SystemError(f'Unrecognized IM mean and stddev file format in {im_raw_path}')
+ SystemError(f'Unrecognized IM mean and stddev file format in {im_raw_path}') # noqa: PLW0133
im_raw = im_sampled
for scen_i in tqdm(
range(len(eq_ids)),
@@ -150,19 +150,19 @@ def simulate_ground_motion(
]
)
- print(
+ print( # noqa: T201
f'ComputeIntensityMeasure: all inter- and intra-event correlation {time.time() - t_start} sec'
)
# return
return ln_im_mr, mag_maf
-class GM_Simulator:
+class GM_Simulator: # noqa: D101
def __init__(
self,
- site_info=[],
- im_list=[],
- im_raw=dict(),
+ site_info=[], # noqa: B006
+ im_list=[], # noqa: B006
+ im_raw=dict(), # noqa: B006, C408
num_simu=0,
correlation_info=None,
im_info=None,
@@ -173,13 +173,13 @@ def __init__(
self.set_im_raw(im_raw, im_list)
self.cross_check_im_correlation()
- def set_sites(self, site_info):
+ def set_sites(self, site_info): # noqa: D102
# set sites
self.sites = site_info.copy()
self.num_sites = len(self.sites)
- if self.num_sites < 2:
+ if self.num_sites < 2: # noqa: PLR2004
self.stn_dist = None
- print(
+ print( # noqa: T201
'GM_Simulator: Only one site is defined, spatial correlation models ignored.'
)
return
@@ -187,8 +187,8 @@ def set_sites(self, site_info):
def _compute_distance_matrix(self):
# site number check
- if self.num_sites < 2:
- print('GM_Simulator: error - please give at least two sites.')
+ if self.num_sites < 2: # noqa: PLR2004
+ print('GM_Simulator: error - please give at least two sites.') # noqa: T201
self.stn_dist = None
return
# compute the distance matrix
@@ -201,11 +201,11 @@ def _compute_distance_matrix(self):
tmp[i, j] = CorrelationModel.get_distance_from_lat_lon(loc_i, loc_j)
self.stn_dist = tmp
- def set_num_simu(self, num_simu):
+ def set_num_simu(self, num_simu): # noqa: D102
# set simulation number
self.num_simu = num_simu
- def set_im_raw(self, im_raw, im_list):
+ def set_im_raw(self, im_raw, im_list): # noqa: D102
# get IM type list
self.im_type_list = im_raw.get('IM', [])
# get im_data
@@ -217,7 +217,7 @@ def set_im_raw(self, im_raw, im_list):
# set IM size
self.num_im = len(self.im_name_list)
- def get_ln_im(self):
+ def get_ln_im(self): # noqa: D102
ln_im = []
for i in range(self.num_sites):
tmp_im_data = []
@@ -228,7 +228,7 @@ def get_ln_im(self):
ln_im.append(tmp_im_data)
return ln_im
- def get_inter_sigma_im(self):
+ def get_inter_sigma_im(self): # noqa: D102
inter_sigma_im = []
for i in range(self.num_sites):
tmp_im_data = []
@@ -240,7 +240,7 @@ def get_inter_sigma_im(self):
inter_sigma_im.append(tmp_im_data)
return inter_sigma_im
- def get_intra_sigma_im(self):
+ def get_intra_sigma_im(self): # noqa: D102
intra_sigma_im = []
for i in range(self.num_sites):
tmp_im_data = []
@@ -252,31 +252,31 @@ def get_intra_sigma_im(self):
intra_sigma_im.append(tmp_im_data)
return intra_sigma_im
- def parse_correlation_info(self, correlation_info, im_info):
+ def parse_correlation_info(self, correlation_info, im_info): # noqa: C901, D102
# default is no correlation model and uncorrelated motions if generated
self.inter_cm = None
self.intra_cm = None
# parse correlation information if any
if correlation_info is None:
- print(
+ print( # noqa: T201
'GM_Simulator: warning - correlation information not found - results will be uncorrelated motions.'
)
return
if correlation_info.get('Type', None) == 'Vector':
- inter_cm = dict()
+ inter_cm = dict() # noqa: C408
im_info.pop('Type')
for im, item in im_info.items():
# for im in self.im_type_list:
inter_cm.update({im: item['InterEventCorr']})
- inter_cm_unique = list(set([item for _, item in inter_cm.items()]))
+ inter_cm_unique = list(set([item for _, item in inter_cm.items()])) # noqa: C403
if len(inter_cm_unique) == 1:
inter_cm = inter_cm_unique[0]
self.inter_cm = inter_cm
- intra_cm = dict()
+ intra_cm = dict() # noqa: C408
for im, item in im_info.items():
# for im in self.im_type_list:
intra_cm.update({im: item['IntraEventCorr']})
- intra_cm_unique = list(set([item for _, item in intra_cm.items()]))
+ intra_cm_unique = list(set([item for _, item in intra_cm.items()])) # noqa: C403
if len(intra_cm_unique) == 1:
intra_cm = intra_cm_unique[0]
self.intra_cm = intra_cm
@@ -289,7 +289,7 @@ def parse_correlation_info(self, correlation_info, im_info):
# back compatibility
self.inter_cm = correlation_info['SaInterEvent']
else:
- print(
+ print( # noqa: T201
'GM_Simulator: no inter-event correlation information not found - results will be uncorrelated motions.'
)
# intra-event model
@@ -299,20 +299,20 @@ def parse_correlation_info(self, correlation_info, im_info):
# back compatibility
self.intra_cm = correlation_info['SaIntraEvent']
else:
- print(
+ print( # noqa: T201
'GM_Simulator: no intra-event correlation information not found - results will be uncorrelated motions.'
)
- def cross_check_im_correlation(self):
+ def cross_check_im_correlation(self): # noqa: C901, D102
# because each correlation model only applies to certain intensity measure
# so hear we check if the correlation models are applicable for the required intensity measures
self.im_cm_inter_flag = True
self.im_cm_intra_flag = True
- if type(self.inter_cm) == dict:
+ if type(self.inter_cm) == dict: # noqa: E721
for cur_im in self.im_type_list:
avail_im_inter_cm = IM_CORR_INTER.get(self.inter_cm[cur_im])
if cur_im not in avail_im_inter_cm:
- print(
+ print( # noqa: T201
f'GM_Simulator.cross_check_im_correlation: warning - {cur_im} is not available in {self.inter_cm}'
)
self.im_cm_inter_flag = False
@@ -322,16 +322,16 @@ def cross_check_im_correlation(self):
if avail_im_inter_cm is not None:
for cur_im in self.im_type_list:
if cur_im not in avail_im_inter_cm:
- print(
+ print( # noqa: T201
f'GM_Simulator.cross_check_im_correlation: warning - {cur_im} is not available in {self.inter_cm}'
)
self.im_cm_inter_flag = False
continue
- if type(self.intra_cm) == dict:
+ if type(self.intra_cm) == dict: # noqa: E721
for cur_im in self.im_type_list:
avail_im_intra_cm = IM_CORR_INTRA.get(self.intra_cm[cur_im])
if cur_im not in avail_im_intra_cm:
- print(
+ print( # noqa: T201
f'GM_Simulator.cross_check_im_correlation: warning - {cur_im} is not available in {self.intra_cm}'
)
self.im_cm_intra_flag = False
@@ -341,13 +341,13 @@ def cross_check_im_correlation(self):
if avail_im_intra_cm is not None:
for cur_im in self.im_type_list:
if cur_im not in avail_im_intra_cm:
- print(
+ print( # noqa: T201
f'GM_Simulator.cross_check_im_correlation: warning - {cur_im} is not available in {self.intra_cm}'
)
self.im_cm_intra_flag = False
continue
- def compute_inter_event_residual_ij(self, cm, im_name_list_1, im_name_list_2):
+ def compute_inter_event_residual_ij(self, cm, im_name_list_1, im_name_list_2): # noqa: D102
if cm == 'Baker & Jayaram (2008)':
rho = np.array(
[
@@ -365,19 +365,19 @@ def compute_inter_event_residual_ij(self, cm, im_name_list_1, im_name_list_2):
]
).reshape([len(im_name_list_1), len(im_name_list_2)])
else:
- # TODO: extending this to more inter-event correlation models
+ # TODO: extending this to more inter-event correlation models # noqa: TD002
sys.exit(
'GM_Simulator.compute_inter_event_residual: currently supporting Baker & Jayaram (2008), Baker & Bradley (2017)'
)
return rho
- def replace_submatrix(self, mat, ind1, ind2, mat_replace):
+ def replace_submatrix(self, mat, ind1, ind2, mat_replace): # noqa: D102
for i, index in enumerate(ind1):
mat[index, ind2] = mat_replace[i, :]
return mat
- def compute_inter_event_residual(self):
- if type(self.inter_cm) == dict:
+ def compute_inter_event_residual(self): # noqa: D102
+ if type(self.inter_cm) == dict: # noqa: E721
rho = np.zeros([self.num_im, self.num_im])
im_types = list(self.inter_cm.keys())
for i in range(len(im_types)):
@@ -437,9 +437,9 @@ def compute_inter_event_residual(self):
np.zeros(self.num_im), rho, self.num_simu
).T
# return
- return residuals
+ return residuals # noqa: RET504
- def compute_intra_event_residual_i(self, cm, im_name_list, num_simu):
+ def compute_intra_event_residual_i(self, cm, im_name_list, num_simu): # noqa: D102
if cm == 'Jayaram & Baker (2009)':
rho = np.zeros((self.num_sites, self.num_sites, len(im_name_list)))
for i in range(self.num_sites):
@@ -472,15 +472,15 @@ def compute_intra_event_residual_i(self, cm, im_name_list, num_simu):
self.sites, im_name_list, num_simu, num_pc
)
else:
- # TODO: extending this to more inter-event correlation models
+ # TODO: extending this to more inter-event correlation models # noqa: TD002
sys.exit(
'GM_Simulator.compute_intra_event_residual: currently supporting Jayaram & Baker (2009), Loth & Baker (2013),Markhvida et al. (2017), Du & Ning (2021)'
)
return residuals
- def compute_intra_event_residual(self):
- if type(self.intra_cm) == dict:
- cm_groups = dict()
+ def compute_intra_event_residual(self): # noqa: D102
+ if type(self.intra_cm) == dict: # noqa: E721
+ cm_groups = dict() # noqa: C408
# Group the IMs using the same cm
for key, item in self.intra_cm.items():
if item not in cm_groups:
@@ -513,11 +513,11 @@ def compute_intra_event_residual(self):
return residuals
-class GM_Simulator_hdf5(GM_Simulator):
+class GM_Simulator_hdf5(GM_Simulator): # noqa: D101
def __init__(
self,
- site_info=[],
- im_list=[],
+ site_info=[], # noqa: B006
+ im_list=[], # noqa: B006
num_simu=0,
correlation_info=None,
im_info=None,
@@ -528,7 +528,7 @@ def __init__(
self.parse_correlation_info(correlation_info, im_info)
self.cross_check_im_correlation()
- def set_im_type(self, im_list):
+ def set_im_type(self, im_list): # noqa: D102
self.im_name_list = im_list
im_types = set()
for im in im_list:
@@ -539,7 +539,7 @@ def set_im_type(self, im_list):
elif im.startswith('PGV'):
im_types.add('PGV')
else:
- SyntaxError(f'Unrecognized im type: {im}')
+ SyntaxError(f'Unrecognized im type: {im}') # noqa: PLW0133
# Add ims one by one because the order is important
self.im_type_list = []
if ('PGA') in im_types:
@@ -549,26 +549,26 @@ def set_im_type(self, im_list):
if ('PGV') in im_types:
self.im_type_list.append('PGV')
- def set_im_raw(self, im_raw, im_list):
+ def set_im_raw(self, im_raw, im_list): # noqa: D102
self.im_name_list = im_list
self.num_im = len(im_list)
self.im_data = im_raw
- def get_ln_im(self):
+ def get_ln_im(self): # noqa: D102
ln_im = []
for i in range(self.num_sites):
tmp_im_data = self.im_data['Mean'][i, :].tolist()
ln_im.append(tmp_im_data)
return ln_im
- def get_inter_sigma_im(self):
+ def get_inter_sigma_im(self): # noqa: D102
inter_sigma_im = []
for i in range(self.num_sites):
tmp_im_data = self.im_data['InterEvStdDev'][i, :].tolist()
inter_sigma_im.append(tmp_im_data)
return inter_sigma_im
- def get_intra_sigma_im(self):
+ def get_intra_sigma_im(self): # noqa: D102
intra_sigma_im = []
for i in range(self.num_sites):
tmp_im_data = self.im_data['IntraEvStdDev'][i, :].tolist()
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py
index 83e6cc19c..2ca2f8fec 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardOccurrence.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2022 Leland Stanford Junior University
# Copyright (c) 2022 The Regents of the University of California
#
@@ -52,18 +52,18 @@
from scipy.stats import norm
from sklearn.linear_model import lasso_path
from tqdm import tqdm
-from USGS_API import *
+from USGS_API import * # noqa: F403
-def configure_hazard_occurrence(
+def configure_hazard_occurrence( # noqa: C901, D103
input_dir,
output_dir,
- IMfile,
+ IMfile, # noqa: N803
im_list,
scenarios,
hzo_config=None,
site_config=None,
- mth_flag=True,
+ mth_flag=True, # noqa: FBT002
):
if hzo_config is None or site_config is None:
# no model is defined
@@ -87,7 +87,7 @@ def configure_hazard_occurrence(
# return periods
if hc_input is None:
return {}
- elif hc_input == 'Inferred_NSHMP':
+ elif hc_input == 'Inferred_NSHMP': # noqa: RET505
period = hzo_config.get('Period', 0.0)
if im_type == 'SA':
cur_imt = im_type + f'{period:.1f}'.replace('.', 'P')
@@ -102,7 +102,7 @@ def configure_hazard_occurrence(
cur_lat = cur_site.get('lat')
cur_vs30 = cur_site.get('vs30', 760)
hazard_curve_collector.append(
- USGS_HazardCurve(
+ USGS_HazardCurve( # noqa: F405
longitude=cur_lon,
latitude=cur_lat,
vs30=cur_vs30,
@@ -112,7 +112,7 @@ def configure_hazard_occurrence(
)
)
hc_data = []
- print(
+ print( # noqa: T201
'HazardOCcurrence: fetching USGS hazard curve for individual sites - this may take a while.'
)
t_start = time.time()
@@ -138,7 +138,7 @@ def configure_hazard_occurrence(
th.join()
# order the res_dict by id
res_ordered = collections.OrderedDict(sorted(hc_dict.items()))
- for i, cur_res in res_ordered.items():
+ for i, cur_res in res_ordered.items(): # noqa: B007
hc_data.append(cur_res)
else:
for i in range(len(hazard_curve_collector)):
@@ -146,12 +146,12 @@ def configure_hazard_occurrence(
if cur_collector.fetch_url():
hc_data.append(cur_collector.get_hazard_curve())
else:
- print(
+ print( # noqa: T201
f'HazardOCcurrence: error in fetching hazard curve for site {i}.'
)
return None
- print(
+ print( # noqa: T201
f'HazardOCcurrence: all hazard curves fetched {time.time() - t_start} sec.'
)
elif hc_input == 'Inferred_sourceFile':
@@ -161,8 +161,8 @@ def configure_hazard_occurrence(
else:
cur_imt = im_type
if IMfile.lower().endswith('.json'):
- with open(IMfile) as f:
- IMdata = json.load(f)
+ with open(IMfile) as f: # noqa: PTH123
+ IMdata = json.load(f) # noqa: N806
hc_data = calc_hazard_curves(IMdata, site_config, cur_imt)
elif IMfile.lower().endswith('.hdf5'):
hc_data = calc_hazard_curves_hdf5(
@@ -171,7 +171,7 @@ def configure_hazard_occurrence(
# c_vect = calc_hazard_contribution(IMdata, site_config,
# return_periods, hc_data, cur_imt)
else:
- hc_input = os.path.join(input_dir, hc_input)
+ hc_input = os.path.join(input_dir, hc_input) # noqa: PTH118
if hc_input.endswith('.csv'):
hc_data = get_hazard_curves(input_csv=hc_input)
elif hc_input.endswith('.json'):
@@ -203,7 +203,7 @@ def configure_hazard_occurrence(
'HazardCurves': hc_interp_list,
}
# output the hazard occurrence information file
- with open(os.path.join(output_dir, 'HazardCurves.json'), 'w') as f:
+ with open(os.path.join(output_dir, 'HazardCurves.json'), 'w') as f: # noqa: PTH118, PTH123
json.dump(occ_dict, f, indent=2)
occ_dict = {
'Model': model_type,
@@ -215,36 +215,36 @@ def configure_hazard_occurrence(
'HazardCurves': hc_interp,
}
# return
- return occ_dict
+ return occ_dict # noqa: RET504
-def fetch_usgs_hazard_curve_para(ids, hc_collectors, hc_dict):
+def fetch_usgs_hazard_curve_para(ids, hc_collectors, hc_dict): # noqa: D103
for cur_id, cur_collector in zip(ids, hc_collectors):
if cur_collector.fetch_url():
hc_dict[cur_id] = cur_collector.get_hazard_curve()
else:
- print(
+ print( # noqa: T201
f'HazardOCcurrence: error in fetching hazard curve for site {cur_id}.'
)
# return
-def calc_hazard_curve_and_contri(IMdata, site_config, im, targetReturnPeriods):
+def calc_hazard_curve_and_contri(IMdata, site_config, im, targetReturnPeriods): # noqa: ARG001, N803, D103
if im[0:2] == 'SA':
period = float(im[2:].replace('P', '.'))
im_name = 'lnSA'
- periods = IMdata[list(IMdata.keys())[0]]['Periods']
+ periods = IMdata[list(IMdata.keys())[0]]['Periods'] # noqa: RUF015
im_ind = np.where(np.array(periods) == period)[0][0]
else:
- im_name = 'lnPGA'
- im_ind = 0
+ im_name = 'lnPGA' # noqa: F841
+ im_ind = 0 # noqa: F841
-def calc_hazard_contribution(IMdata, site_config, targetReturnPeriods, hc_data, im):
+def calc_hazard_contribution(IMdata, site_config, targetReturnPeriods, hc_data, im): # noqa: N803, D103
if im[0:2] == 'SA':
period = float(im[2:].replace('P', '.'))
im_name = 'lnSA'
- periods = IMdata[list(IMdata.keys())[0]]['Periods']
+ periods = IMdata[list(IMdata.keys())[0]]['Periods'] # noqa: RUF015
im_ind = np.where(np.array(periods) == period)[0][0]
else:
im_name = 'lnPGA'
@@ -256,12 +256,12 @@ def calc_hazard_contribution(IMdata, site_config, targetReturnPeriods, hc_data,
):
c_j = 0
scenario = IMdata[list(IMdata.keys())[j]]
- mar = scenario['MeanAnnualRate']
+ mar = scenario['MeanAnnualRate'] # noqa: F841
for r in range(len(targetReturnPeriods)):
for i in range(len(site_config)):
- lnIM = scenario['GroundMotions'][i][im_name]
- lnIM_mean = lnIM['Mean'][im_ind]
- lnIM_std = lnIM['TotalStdDev'][im_ind]
+ lnIM = scenario['GroundMotions'][i][im_name] # noqa: N806
+ lnIM_mean = lnIM['Mean'][im_ind] # noqa: N806
+ lnIM_std = lnIM['TotalStdDev'][im_ind] # noqa: N806
y_ir = np.interp(
targetReturnPeriods[r],
np.array(hc_data[i]['ReturnPeriod']),
@@ -270,32 +270,32 @@ def calc_hazard_contribution(IMdata, site_config, targetReturnPeriods, hc_data,
right=hc_data[i]['ReturnPeriod'][-1],
)
p_exceed = 1 - norm.cdf(np.log(y_ir), lnIM_mean, lnIM_std)
- normConstant = 0
+ normConstant = 0 # noqa: N806
for j2 in range(len(IMdata)):
pj = IMdata[list(IMdata.keys())[j2]]['MeanAnnualRate']
- lnIM2 = IMdata[list(IMdata.keys())[j2]]['GroundMotions'][i][
+ lnIM2 = IMdata[list(IMdata.keys())[j2]]['GroundMotions'][i][ # noqa: N806
im_name
]
- lnIM_mean2 = lnIM2['Mean'][im_ind]
- lnIM_std2 = lnIM2['TotalStdDev'][im_ind]
+ lnIM_mean2 = lnIM2['Mean'][im_ind] # noqa: N806
+ lnIM_std2 = lnIM2['TotalStdDev'][im_ind] # noqa: N806
p_exceed2 = 1 - norm.cdf(np.log(y_ir), lnIM_mean2, lnIM_std2)
- normConstant += p_exceed2
+ normConstant += p_exceed2 # noqa: N806
c_j += pj * p_exceed / normConstant
c_vect[j] = c_j
return c_vect
-def calc_hazard_curves(IMdata, site_config, im):
+def calc_hazard_curves(IMdata, site_config, im): # noqa: N803, D103
if im[0:2] == 'SA':
period = float(im[2:].replace('P', '.'))
im_name = 'lnSA'
- periods = IMdata[list(IMdata.keys())[0]]['Periods']
+ periods = IMdata[list(IMdata.keys())[0]]['Periods'] # noqa: RUF015
im_ind = np.where(np.array(periods) == period)[0][0]
else:
im_name = 'lnPGA'
im_ind = 0
- IMRange = np.power(10, np.linspace(-4, 2, 60))
- exceedRate = np.zeros((len(IMRange), len(site_config)))
+ IMRange = np.power(10, np.linspace(-4, 2, 60)) # noqa: N806
+ exceedRate = np.zeros((len(IMRange), len(site_config))) # noqa: N806
hc_data = [
{'siteID': 0, 'ReturnPeriod': list(exceedRate), 'IM': list(exceedRate)}
] * len(site_config)
@@ -307,13 +307,13 @@ def calc_hazard_curves(IMdata, site_config, im):
scenario = IMdata[scenario_idx[scenario_ind]]
mar = scenario['MeanAnnualRate']
for site_ind in range(len(site_config)):
- lnIM = scenario['GroundMotions'][site_ind][im_name]
- lnIM_mean = lnIM['Mean'][im_ind]
- lnIM_std = lnIM['TotalStdDev'][im_ind]
+ lnIM = scenario['GroundMotions'][site_ind][im_name] # noqa: N806
+ lnIM_mean = lnIM['Mean'][im_ind] # noqa: N806
+ lnIM_std = lnIM['TotalStdDev'][im_ind] # noqa: N806
p_exceed = 1 - norm.cdf(np.log(IMRange), lnIM_mean, lnIM_std)
rate_exceed = mar * p_exceed
exceedRate[:, site_ind] = exceedRate[:, site_ind] + rate_exceed
- exceedRate[exceedRate < 1e-20] = 1e-20
+ exceedRate[exceedRate < 1e-20] = 1e-20 # noqa: PLR2004
for site_ind, site in enumerate(site_config):
hc_data[site_ind] = {
'SiteID': site['ID'],
@@ -323,32 +323,32 @@ def calc_hazard_curves(IMdata, site_config, im):
return hc_data
-def calc_hazard_curves_hdf5(IMfile, im_list, site_config, im, scenarios):
+def calc_hazard_curves_hdf5(IMfile, im_list, site_config, im, scenarios): # noqa: N803, D103
im_ind = im_list.index(im)
- IMRange = np.power(10, np.linspace(-4, 2, 60))
- exceedRate = np.zeros((len(IMRange), len(site_config)))
+ IMRange = np.power(10, np.linspace(-4, 2, 60)) # noqa: N806
+ exceedRate = np.zeros((len(IMRange), len(site_config))) # noqa: N806
hc_data = [
{'siteID': 0, 'ReturnPeriod': list(exceedRate), 'IM': list(exceedRate)}
] * len(site_config)
scenario_idx = list(scenarios.keys())
- with h5py.File(IMfile, 'r') as IMdata:
+ with h5py.File(IMfile, 'r') as IMdata: # noqa: N806
for scenario_ind in tqdm(
range(len(scenario_idx)),
desc='Calculate ' f'Hazard Curves from {len(scenario_idx)} scenarios',
):
scenario_im = IMdata[str(scenario_idx[scenario_ind])]
mar = scenarios[scenario_idx[scenario_ind]]['MeanAnnualRate']
- lnIM_mean = scenario_im['Mean'][:, im_ind]
- lnIM_interStd = scenario_im['InterEvStdDev'][:, im_ind]
- lnIM_intraStd = scenario_im['IntraEvStdDev'][:, im_ind]
- lnIM_std = np.sqrt(lnIM_intraStd**2 + lnIM_interStd**2)
+ lnIM_mean = scenario_im['Mean'][:, im_ind] # noqa: N806
+ lnIM_interStd = scenario_im['InterEvStdDev'][:, im_ind] # noqa: N806
+ lnIM_intraStd = scenario_im['IntraEvStdDev'][:, im_ind] # noqa: N806
+ lnIM_std = np.sqrt(lnIM_intraStd**2 + lnIM_interStd**2) # noqa: N806
for site_ind in range(len(site_config)):
p_exceed = 1 - norm.cdf(
np.log(IMRange), lnIM_mean[site_ind], lnIM_std[site_ind]
)
rate_exceed = mar * p_exceed
exceedRate[:, site_ind] = exceedRate[:, site_ind] + rate_exceed
- exceedRate[exceedRate < 1e-20] = 1e-20
+ exceedRate[exceedRate < 1e-20] = 1e-20 # noqa: PLR2004
for site_ind, site in enumerate(site_config):
hc_data[site_ind] = {
'SiteID': site['ID'],
@@ -358,7 +358,7 @@ def calc_hazard_curves_hdf5(IMfile, im_list, site_config, im, scenarios):
return hc_data
-def get_hazard_curves(input_dir=None, input_csv=None, input_json=None):
+def get_hazard_curves(input_dir=None, input_csv=None, input_json=None): # noqa: D103
if input_dir is not None:
return None
@@ -368,7 +368,7 @@ def get_hazard_curves(input_dir=None, input_csv=None, input_json=None):
return_periods = df_hc.iloc[0, 1:].to_numpy().tolist()
hc_data = []
for i in range(num_sites):
- hc_data.append(
+ hc_data.append( # noqa: PERF401
{
'SiteID': i,
'ReturnPeriod': return_periods,
@@ -377,15 +377,15 @@ def get_hazard_curves(input_dir=None, input_csv=None, input_json=None):
)
return hc_data
- if input_json is not None:
- with open(input_json) as f:
+ if input_json is not None: # noqa: RET503
+ with open(input_json) as f: # noqa: PTH123
hc_data = json.load(f)
- return hc_data
+ return hc_data # noqa: RET504
# KZ-08/23/22: adding a function for computing exceeding probability at an im level
-def get_im_exceedance_probility(
- IMfile,
+def get_im_exceedance_probility( # noqa: C901, D103
+ IMfile, # noqa: N803
im_list,
im_type,
period,
@@ -400,7 +400,7 @@ def get_im_exceedance_probility(
# initialize output
if IMfile.lower().endswith('.json'):
- with open(IMfile) as f:
+ with open(IMfile) as f: # noqa: PTH123
im_raw = json.load(f)
num_sites = len(im_raw[scenario_idx[0]].get('GroundMotions'))
elif IMfile.lower().endswith('.hdf5'):
@@ -412,31 +412,31 @@ def get_im_exceedance_probility(
if IMfile.lower().endswith('.json'):
if im_type == 'PGA':
if 'PGA' not in im_raw[scenario_idx[0]]['IM']:
- print(
+ print( # noqa: T201
'IM_Calculator.get_im_exceedance_probility: error - IM {} does not match to {}.'.format(
period, im_raw[scenario_idx[0]].get('IM')
)
)
return im_exceedance_prob
- else:
- periodID = 0
+ else: # noqa: RET505
+ periodID = 0 # noqa: N806
elif period not in im_raw[scenario_idx[0]].get('Periods'):
- print(
+ print( # noqa: T201
'IM_Calculator.get_im_exceedance_probility: error - period {} does not match to {}.'.format(
period, im_raw[scenario_idx[0]].get('Periods')
)
)
return im_exceedance_prob
else:
- periodID = im_raw[scenario_idx[0]].get('Periods').index(period)
+ periodID = im_raw[scenario_idx[0]].get('Periods').index(period) # noqa: N806
# start to compute the exceedance probability
for k in range(num_scen):
- allGM = im_raw[scenario_idx[k]].get('GroundMotions')
+ allGM = im_raw[scenario_idx[k]].get('GroundMotions') # noqa: N806
for i in range(num_sites):
- curIM = allGM[i].get(f'ln{im_type}')
- curMean = curIM.get('Mean')[periodID]
- curStd = curIM.get('TotalStdDev')[periodID]
+ curIM = allGM[i].get(f'ln{im_type}') # noqa: N806
+ curMean = curIM.get('Mean')[periodID] # noqa: N806
+ curStd = curIM.get('TotalStdDev')[periodID] # noqa: N806
im_exceedance_prob[i, k, :] = 1.0 - norm.cdf(
np.log(im_level[i, :]), loc=curMean, scale=curStd
)
@@ -449,21 +449,21 @@ def get_im_exceedance_probility(
else:
im_name = f'SA({period!s})'
else:
- SystemExit(f'{im_type} is not supported in hazard downsampling')
+ SystemExit(f'{im_type} is not supported in hazard downsampling') # noqa: PLW0133
if im_name not in im_list:
- print(
+ print( # noqa: T201
f'IM_Calculator.get_im_exceedance_probility: error - intensity measure {im_name} does not match to {im_list}.'
)
return im_exceedance_prob
im_ind = im_list.index(im_name)
with h5py.File(IMfile, 'r') as im_raw:
for k in range(num_scen):
- curIM = im_raw[str(scenario_idx[k])]
+ curIM = im_raw[str(scenario_idx[k])] # noqa: N806
for i in range(num_sites):
- curMean = curIM['Mean'][i, im_ind]
- curInterStd = curIM['InterEvStdDev'][i, im_ind]
- curIntraStd = curIM['IntraEvStdDev'][i, im_ind]
- curStd = np.sqrt(curInterStd**2 + curIntraStd**2)
+ curMean = curIM['Mean'][i, im_ind] # noqa: N806
+ curInterStd = curIM['InterEvStdDev'][i, im_ind] # noqa: N806
+ curIntraStd = curIM['IntraEvStdDev'][i, im_ind] # noqa: N806
+ curStd = np.sqrt(curInterStd**2 + curIntraStd**2) # noqa: N806
im_exceedance_prob[i, k, :] = 1.0 - norm.cdf(
np.log(im_level[i, :]), loc=curMean, scale=curStd
)
@@ -471,7 +471,7 @@ def get_im_exceedance_probility(
return im_exceedance_prob
-def get_im_exceedance_probability_gm(
+def get_im_exceedance_probability_gm( # noqa: D103
im_raw,
im_list,
im_type,
@@ -483,10 +483,10 @@ def get_im_exceedance_probability_gm(
for i in range(len(im_list)):
if im_type in im_list[i]:
if im_type == 'SA' and float(im_list[i].split('(')[1][:-1]) == period:
- periodID = i
+ periodID = i # noqa: N806
break
- else:
- periodID = i
+ else: # noqa: RET508
+ periodID = i # noqa: N806
# number of intensity levels
num_rps = im_level.shape[1]
@@ -500,7 +500,7 @@ def get_im_exceedance_probability_gm(
occurrence_rate = [None] * num_simu * num_scen
for i in range(num_scen):
for j in range(num_site):
- curIM = im_raw[i][j, periodID, :]
+ curIM = im_raw[i][j, periodID, :] # noqa: N806
for k in range(num_simu):
im_exceedance_prob[j, i * num_simu + k, :] = [
int(x) for x in curIM[k] > im_level[j, :]
@@ -510,7 +510,7 @@ def get_im_exceedance_probability_gm(
return im_exceedance_prob, occurrence_rate
-def sample_earthquake_occurrence(
+def sample_earthquake_occurrence( # noqa: D103
model_type,
num_target_eqs,
return_periods,
@@ -544,7 +544,7 @@ def sample_earthquake_occurrence(
# solve the optimiation
om.solve_opt()
else:
- print(
+ print( # noqa: T201
'HazardOccurrence.get_im_exceedance_probility: {} is not available yet.'
)
return None
@@ -552,11 +552,11 @@ def sample_earthquake_occurrence(
return om
-def export_sampled_earthquakes(error, id_selected_eqs, eqdata, P, output_dir=None):
- probabilityWeight = [P[x] for x in id_selected_eqs]
+def export_sampled_earthquakes(error, id_selected_eqs, eqdata, P, output_dir=None): # noqa: N803, D103
+ probabilityWeight = [P[x] for x in id_selected_eqs] # noqa: N806
selected_eqs = []
for i in id_selected_eqs:
- selected_eqs.append(eqdata[i])
+ selected_eqs.append(eqdata[i]) # noqa: PERF401
dict_selected_eqs = {
'EarthquakeNumber': len(id_selected_eqs),
'EarthquakeID': id_selected_eqs,
@@ -566,7 +566,7 @@ def export_sampled_earthquakes(error, id_selected_eqs, eqdata, P, output_dir=Non
}
if output_dir is not None:
- with open(os.path.join(output_dir, 'RupSampled.json'), 'w') as f:
+ with open(os.path.join(output_dir, 'RupSampled.json'), 'w') as f: # noqa: PTH118, PTH123
json.dump(dict_selected_eqs, f, indent=2)
@@ -612,13 +612,13 @@ def export_sampled_earthquakes(error, id_selected_eqs, eqdata, P, output_dir=Non
# json.dump(dict_selected_eqs, f, indent=2)
-class OccurrenceModel_ManzourDavidson2016:
+class OccurrenceModel_ManzourDavidson2016: # noqa: D101
def __init__(
self,
- return_periods=[],
- im_exceedance_probs=[],
+ return_periods=[], # noqa: B006
+ im_exceedance_probs=[], # noqa: B006
num_scenarios=-1,
- reweight_only=False,
+ reweight_only=False, # noqa: FBT002
occurence_rate_origin=None,
):
"""__init__: initialization a hazard occurrence optimizer
@@ -626,7 +626,7 @@ def __init__(
:param earthquake_mafs: 1-D array of annual occurrence probability, MAF(j)
:param im_exceedance_probs: 3-D array of exceedance probability of Sa, EP(i,j,r) for site #i, earthquake #j, return period #r
:param num_scenarios: integer for number of target scenarios
- """
+ """ # noqa: D205, D400
# read input parameters
self.return_periods = return_periods
self.im_exceedance_probs = im_exceedance_probs
@@ -637,63 +637,63 @@ def __init__(
# check input parameters
self.input_valid = self._input_check()
if not self.input_valid:
- print(
+ print( # noqa: T201
'OccurrenceModel_ManzourDavidson2016.__init__: at least one input parameter invalid.'
)
return
def _input_check(self):
- """_input_check: check of input parameters"""
+ """_input_check: check of input parameters""" # noqa: D400
# number of return periods
if len(self.return_periods) > 0:
self.num_return_periods = len(self.return_periods)
- print(
+ print( # noqa: T201
f'OccurrenceModel_ManzourDavidson2016._input_check: number of return periods = {self.num_return_periods}.'
)
else:
- print(
+ print( # noqa: T201
'OccurrenceModel_ManzourDavidson2016._input_check: no return period is defined.'
)
return False
# shape of exceedance probability
- if len(self.im_exceedance_probs.shape) != 3:
- print(
+ if len(self.im_exceedance_probs.shape) != 3: # noqa: PLR2004
+ print( # noqa: T201
'OccurrenceModel_ManzourDavidson2016._input_check: exceedance probability array should be 3-D.'
)
return False
- elif self.im_exceedance_probs.shape[-1] != len(self.return_periods):
- print(
+ elif self.im_exceedance_probs.shape[-1] != len(self.return_periods): # noqa: RET505
+ print( # noqa: T201
'OccurrenceModel_ManzourDavidson2016._input_check: exceedance probability array should have dimensions of (#site, #eq, #return_period).'
)
return False
else:
self.num_sites = self.im_exceedance_probs.shape[0]
- print(
+ print( # noqa: T201
f'OccurrenceModel_ManzourDavidson2016._input_check: number of sites = {self.num_sites}.'
)
# number of target scenarios
if self.num_scenarios <= 0:
- print(
+ print( # noqa: T201
'OccurrenceModel_ManzourDavidson2016._input_check: number of target scenarios should be positive.'
)
return False
- else:
+ else: # noqa: RET505
# initialize outputs
init_flag = False
init_flag = self._opt_initialization()
if init_flag:
- print(
+ print( # noqa: T201
'OccurrenceModel_ManzourDavidson2016._input_check: initialization completed.'
)
return True
- else:
- print(
+ else: # noqa: RET505
+ print( # noqa: T201
'OccurrenceModel_ManzourDavidson2016._input_check: initialization errors.'
)
return False
def _opt_initialization(self):
- """_opt_initialization: initialization of optimization problem"""
+ """_opt_initialization: initialization of optimization problem""" # noqa: D400
# the problem is mixed integer program
self.prob = pulp.LpProblem('MIP', pulp.LpMinimize)
@@ -762,21 +762,21 @@ def _opt_initialization(self):
def solve_opt(self):
"""target_function: compute the target function to be minimized
:param X: 2-D array of annual occurrence probability of earthquakes and corresponding binary variables (many values are reduced to zeros)
- """
+ """ # noqa: D205, D400
maximum_runtime = 1 * 60 * 60 # 1 hours maximum
self.prob.solve(pulp.PULP_CBC_CMD(timeLimit=maximum_runtime, gapRel=0.001))
- print('Status:', pulp.LpStatus[self.prob.status])
+ print('Status:', pulp.LpStatus[self.prob.status]) # noqa: T201
- def get_selected_earthquake(self):
- P_selected = [self.P[i].varValue for i in range(self.num_eqs)]
+ def get_selected_earthquake(self): # noqa: D102
+ P_selected = [self.P[i].varValue for i in range(self.num_eqs)] # noqa: N806
if self.reweight_only:
- Z_selected = [1 for i in range(self.num_eqs)]
+ Z_selected = [1 for i in range(self.num_eqs)] # noqa: N806
else:
- Z_selected = [self.Z[i].varValue for i in range(self.num_eqs)]
+ Z_selected = [self.Z[i].varValue for i in range(self.num_eqs)] # noqa: N806
return P_selected, Z_selected
- def get_error_vector(self):
+ def get_error_vector(self): # noqa: D102
e_plus_selected = np.zeros([self.num_sites, self.num_return_periods])
e_minus_selected = np.zeros([self.num_sites, self.num_return_periods])
for i in range(self.num_sites):
@@ -786,13 +786,13 @@ def get_error_vector(self):
error = ((e_plus_selected - e_minus_selected) ** 2).sum(
axis=1
) / self.num_return_periods
- return error
+ return error # noqa: RET504
- def export_sampled_gmms(
+ def export_sampled_gmms( # noqa: D102
self,
id_selected_gmms,
id_selected_scens,
- P,
+ P, # noqa: N803
output_dir=None,
):
dict_selected_gmms = {
@@ -802,17 +802,17 @@ def export_sampled_gmms(
}
if output_dir is not None:
- with open(os.path.join(output_dir, 'InfoSampledGM.json'), 'w') as f:
+ with open(os.path.join(output_dir, 'InfoSampledGM.json'), 'w') as f: # noqa: PTH118, PTH123
json.dump(dict_selected_gmms, f, indent=2)
-class OccurrenceModel_Wangetal2023:
+class OccurrenceModel_Wangetal2023: # noqa: D101
def __init__(
self,
- return_periods=[],
- im_exceedance_probs=[],
+ return_periods=[], # noqa: B006
+ im_exceedance_probs=[], # noqa: B006
num_scenarios=-1,
- reweight_only=False,
+ reweight_only=False, # noqa: FBT002
occurence_rate_origin=None,
hzo_config=None,
):
@@ -821,7 +821,7 @@ def __init__(
:param earthquake_mafs: 1-D array of annual occurrence probability, MAF(j)
:param im_exceedance_probs: 3-D array of exceedance probability of Sa, EP(i,j,r) for site #i, earthquake #j, return period #r
:param num_scenarios: integer for number of target scenarios
- """
+ """ # noqa: D205, D400
# read input parameters
self.return_periods = return_periods
self.im_exceedance_probs = im_exceedance_probs
@@ -836,63 +836,63 @@ def __init__(
# check input parameters
self.input_valid = self._input_check()
if not self.input_valid:
- print(
+ print( # noqa: T201
'OccurrenceModel_Wangetal2023.__init__: at least one input parameter invalid.'
)
return
def _input_check(self):
- """_input_check: check of input parameters"""
+ """_input_check: check of input parameters""" # noqa: D400
# number of return periods
if len(self.return_periods) > 0:
self.num_return_periods = len(self.return_periods)
- print(
+ print( # noqa: T201
f'OccurrenceModel_Wangetal2023._input_check: number of return periods = {self.num_return_periods}.'
)
else:
- print(
+ print( # noqa: T201
'OccurrenceModel_Wangetal2023._input_check: no return period is defined.'
)
return False
# shape of exceedance probability
- if len(self.im_exceedance_probs.shape) != 3:
- print(
+ if len(self.im_exceedance_probs.shape) != 3: # noqa: PLR2004
+ print( # noqa: T201
'OccurrenceModel_Wangetal2023._input_check: exceedance probability array should be 3-D.'
)
return False
- elif self.im_exceedance_probs.shape[-1] != len(self.return_periods):
- print(
+ elif self.im_exceedance_probs.shape[-1] != len(self.return_periods): # noqa: RET505
+ print( # noqa: T201
'OccurrenceModel_Wangetal2023._input_check: exceedance probability array should have dimensions of (#site, #eq, #return_period).'
)
return False
else:
self.num_sites = self.im_exceedance_probs.shape[0]
- print(
+ print( # noqa: T201
f'OccurrenceModel_Wangetal2023._input_check: number of sites = {self.num_sites}.'
)
# number of target scenarios
if self.num_scenarios <= 0:
- print(
+ print( # noqa: T201
'OccurrenceModel_Wangetal2023._input_check: number of target scenarios should be positive.'
)
return False
- else:
+ else: # noqa: RET505
# initialize outputs
init_flag = False
init_flag = self._opt_initialization()
if init_flag:
- print(
+ print( # noqa: T201
'OccurrenceModel_Wangetal2023._input_check: initialization completed.'
)
return True
- else:
- print(
+ else: # noqa: RET505
+ print( # noqa: T201
'OccurrenceModel_Wangetal2023._input_check: initialization errors.'
)
return False
def _opt_initialization(self):
- """_opt_initialization: initialization of LASSO regression"""
+ """_opt_initialization: initialization of LASSO regression""" # noqa: D400
# define X
self.X_P = (
self.im_exceedance_probs.transpose(1, 0, 2)
@@ -919,7 +919,7 @@ def _opt_initialization(self):
return True
def solve_opt(self):
- """LASSO regression"""
+ """LASSO regression""" # noqa: D400
if self.alpha_path:
self.alphas, self.coefs, _ = lasso_path(
X=self.X_weighted,
@@ -939,7 +939,7 @@ def solve_opt(self):
# re-regression may be needed here !!!
- def get_selected_earthquake(self):
+ def get_selected_earthquake(self): # noqa: D102
# calculate the number of selected events for each step
self.num_selected = [
sum(x > 0 for x in self.coefs[:, i]) for i in range(self.coefs.shape[1])
@@ -955,7 +955,7 @@ def get_selected_earthquake(self):
if self.num_selected[self.selected_alpha_ind] == 0:
sys.exit(
- 'ERROR: Zero scenarios/ground motions are selected in Wang et al. (2023).\n'
+ 'ERROR: Zero scenarios/ground motions are selected in Wang et al. (2023).\n' # noqa: ISC003
+ f'The tunnling parameter used is {self.alphas[self.selected_alpha_ind]}.\n'
+ 'Try using a smaller tuning parameter.'
)
@@ -965,18 +965,18 @@ def get_selected_earthquake(self):
self.Z_selected = self.coefs[:, self.selected_alpha_ind] > 0
return self.Rate_selected, self.Z_selected
- def get_error_vector(self):
+ def get_error_vector(self): # noqa: D102
# self.e_selected = self.y - np.dot(self.X, self.coefs[:,self.selected_alpha_ind])
error = self.y - self.X.sum(axis=1)
error = error.reshape(self.num_sites, self.num_return_periods)
error = (error**2).sum(axis=1) / self.num_return_periods
- return error
+ return error # noqa: RET504
- def export_sampled_gmms(
+ def export_sampled_gmms( # noqa: D102
self,
id_selected_gmms,
id_selected_scens,
- P,
+ P, # noqa: N803
output_dir=None,
):
dict_selected_gmms = {
@@ -987,5 +987,5 @@ def export_sampled_gmms(
}
if output_dir is not None:
- with open(os.path.join(output_dir, 'InfoSampledGM.json'), 'w') as f:
+ with open(os.path.join(output_dir, 'InfoSampledGM.json'), 'w') as f: # noqa: PTH118, PTH123
json.dump(dict_selected_gmms, f, indent=2)
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulation.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulation.py
index e71911b91..2fe6f8bac 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulation.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulation.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2022 Leland Stanford Junior University
# Copyright (c) 2022 The Regents of the University of California
#
@@ -52,23 +52,23 @@
R2D = True
-def site_job(hazard_info):
+def site_job(hazard_info): # noqa: C901, D103
# Sites and stations
- print('HazardSimulation: creating stations.')
+ print('HazardSimulation: creating stations.') # noqa: T201
site_info = hazard_info['Site']
if site_info['Type'] == 'From_CSV':
- input_file = os.path.join(input_dir, site_info['input_file'])
+ input_file = os.path.join(input_dir, site_info['input_file']) # noqa: PTH118
output_file = site_info.get('output_file', False)
if output_file:
- output_file = os.path.join(output_dir, output_file)
- min_ID = site_info['min_ID']
- max_ID = site_info['max_ID']
+ output_file = os.path.join(output_dir, output_file) # noqa: PTH118
+ min_ID = site_info['min_ID'] # noqa: N806
+ max_ID = site_info['max_ID'] # noqa: N806
# forward compatibility
if minID:
- min_ID = minID
+ min_ID = minID # noqa: N806
site_info['min_ID'] = minID
if maxID:
- max_ID = maxID
+ max_ID = maxID # noqa: N806
site_info['max_ID'] = maxID
# Creating stations from the csv input file
z1_tag = 0
@@ -83,11 +83,11 @@ def site_job(hazard_info):
else:
vs30_tag = 0
# Bedrock depth
- zTR_tag = 0
+ zTR_tag = 0 # noqa: N806
if 'SoilGrid250' in site_info['BedrockDepth']['Type']:
- zTR_tag = 0
+ zTR_tag = 0 # noqa: N806
elif 'National Crustal Model' in site_info['BedrockDepth']['Type']:
- zTR_tag = 1
+ zTR_tag = 1 # noqa: N806
# soil model if any
if site_info.get('SoilModel', None) is not None:
soil_model_type = site_info['SoilModel'].get('Type', 'EI')
@@ -98,9 +98,9 @@ def site_job(hazard_info):
if soil_model_type == 'User':
soil_user_fun = site_info['SoilModel'].get('Parameters', None)
if soil_user_fun is not None:
- soil_user_fun = os.path.join(input_dir, soil_user_fun)
+ soil_user_fun = os.path.join(input_dir, soil_user_fun) # noqa: PTH118
# Creating stations from the csv input file
- stations = create_stations(
+ stations = create_stations( # noqa: F405
input_file,
output_file,
min_ID,
@@ -114,29 +114,29 @@ def site_job(hazard_info):
soil_user_fun=soil_user_fun,
)
if stations:
- print(f'HazardSimulation: site data are fetched and saved in {output_file}.')
+ print(f'HazardSimulation: site data are fetched and saved in {output_file}.') # noqa: T201
else:
- print(
+ print( # noqa: T201
'HazardSimulation: please check the "Input" directory in the configuration json file.'
)
- exit()
+ exit() # noqa: PLR1722
-def hazard_job(hazard_info):
+def hazard_job(hazard_info): # noqa: C901, D103, PLR0915
# Sites and stations
- print('HazardSimulation: creating stations.')
+ print('HazardSimulation: creating stations.') # noqa: T201
site_info = hazard_info['Site']
if site_info['Type'] == 'From_CSV':
- input_file = os.path.join(input_dir, site_info['input_file'])
+ input_file = os.path.join(input_dir, site_info['input_file']) # noqa: PTH118
output_file = site_info.get('output_file', False)
if output_file:
- output_file = os.path.join(input_dir, output_file)
- min_ID = site_info.get('min_ID', None)
- max_ID = site_info.get('max_ID', None)
- filterIDs = site_info.get('filterIDs', None)
+ output_file = os.path.join(input_dir, output_file) # noqa: PTH118
+ min_ID = site_info.get('min_ID', None) # noqa: N806
+ max_ID = site_info.get('max_ID', None) # noqa: N806
+ filterIDs = site_info.get('filterIDs', None) # noqa: N806
# backward compatibility. Deleter after new frontend releases
if min_ID is not None and max_ID is not None:
- filterIDs = str(min_ID) + '-' + str(max_ID)
+ filterIDs = str(min_ID) + '-' + str(max_ID) # noqa: N806
# Creating stations from the csv input file
z1_tag = 0
z25_tag = 0
@@ -152,20 +152,20 @@ def hazard_job(hazard_info):
else:
vs30_tag = 0
# Creating stations from the csv input file
- stations = create_stations(
+ stations = create_stations( # noqa: F405
input_file, output_file, filterIDs, vs30_tag, z1_tag, z25_tag
)
if stations:
- print('HazardSimulation: stations created.')
+ print('HazardSimulation: stations created.') # noqa: T201
else:
- print(
+ print( # noqa: T201
'HazardSimulation: please check the "Input" directory in the configuration json file.'
)
- exit()
+ exit() # noqa: PLR1722
# print(stations)
# Scenarios
- print('HazardSimulation: creating scenarios.')
+ print('HazardSimulation: creating scenarios.') # noqa: T201
scenario_info = hazard_info['Scenario']
if scenario_info['Type'] == 'Earthquake':
# KZ-10/31/2022: checking user-provided scenarios
@@ -173,28 +173,28 @@ def hazard_job(hazard_info):
'UserScenarioFile', False
)
if user_scenarios:
- scenarios = load_earthquake_scenarios(scenario_info, stations, dir_info)
+ scenarios = load_earthquake_scenarios(scenario_info, stations, dir_info) # noqa: F405
# Creating earthquake scenarios
elif scenario_info['EqRupture']['Type'] in ['PointSource', 'ERF']:
- scenarios = create_earthquake_scenarios(
+ scenarios = create_earthquake_scenarios( # noqa: F405
scenario_info, stations, dir_info
)
elif scenario_info['Type'] == 'Wind':
# Creating wind scenarios
- scenarios = create_wind_scenarios(scenario_info, stations, input_dir)
+ scenarios = create_wind_scenarios(scenario_info, stations, input_dir) # noqa: F405
else:
- # TODO: extending this to other hazards
- print('HazardSimulation: currently only supports EQ and Wind simulations.')
+ # TODO: extending this to other hazards # noqa: TD002
+ print('HazardSimulation: currently only supports EQ and Wind simulations.') # noqa: T201
# print(scenarios)
- print('HazardSimulation: scenarios created.')
+ print('HazardSimulation: scenarios created.') # noqa: T201
# Computing intensity measures
- print('HazardSimulation: computing intensity measures.')
+ print('HazardSimulation: computing intensity measures.') # noqa: T201
if scenario_info['Type'] == 'Earthquake':
# Computing uncorrelated Sa
event_info = hazard_info['Event']
if opensha_flag:
- im_raw, im_info = compute_im(
+ im_raw, im_info = compute_im( # noqa: F405
scenarios,
stations['Stations'],
event_info['GMPE'],
@@ -207,7 +207,7 @@ def hazard_job(hazard_info):
event_info['IntensityMeasure'] = im_info
elif oq_flag:
# Preparing config ini for OpenQuake
- filePath_ini, oq_ver_loaded, event_info = openquake_config(
+ filePath_ini, oq_ver_loaded, event_info = openquake_config( # noqa: N806, F405
site_info, scenario_info, event_info, dir_info
)
if not filePath_ini:
@@ -222,7 +222,7 @@ def hazard_job(hazard_info):
]:
# Calling openquake to run classical PSHA
# oq_version = scenario_info['EqRupture'].get('OQVersion',default_oq_version)
- oq_run_flag = oq_run_classical_psha(
+ oq_run_flag = oq_run_classical_psha( # noqa: F405
filePath_ini,
exports='csv',
oq_version=oq_ver_loaded,
@@ -234,15 +234,15 @@ def hazard_job(hazard_info):
err_msg = (
err_msg
+ ' Please see if there is leaked python threads in background still occupying {}.'.format(
- os.path.expanduser('~/oqdata/db.sqlite3')
+ os.path.expanduser('~/oqdata/db.sqlite3') # noqa: PTH111
)
)
- print(err_msg)
+ print(err_msg) # noqa: T201
sys.exit(err_msg)
else:
- print('HazardSimulation: OpenQuake Classical PSHA completed.')
+ print('HazardSimulation: OpenQuake Classical PSHA completed.') # noqa: T201
if scenario_info['EqRupture'].get('UHS', False):
- ln_im_mr, mag_maf, im_list = oq_read_uhs_classical_psha(
+ ln_im_mr, mag_maf, im_list = oq_read_uhs_classical_psha( # noqa: F405
scenario_info, event_info, dir_info
)
else:
@@ -253,13 +253,13 @@ def hazard_job(hazard_info):
elif scenario_info['EqRupture']['Type'] == 'OpenQuakeScenario':
# Creating and conducting OpenQuake calculations
- oq_calc = OpenQuakeHazardCalc(
+ oq_calc = OpenQuakeHazardCalc( # noqa: F405
filePath_ini, event_info, oq_ver_loaded, dir_info=dir_info
)
oq_calc.run_calc()
im_raw = [oq_calc.eval_calc()]
# stn_new = stations['Stations']
- print('HazardSimulation: OpenQuake Scenario calculation completed.')
+ print('HazardSimulation: OpenQuake Scenario calculation completed.') # noqa: T201
else:
sys.exit(
@@ -279,7 +279,7 @@ def hazard_job(hazard_info):
reweight_only = occurrence_info.get('ReweightOnly', False)
# KZ-10/31/22: adding a flag for whether to re-sample ground motion maps or just monte-carlo
sampling_gmms = occurrence_info.get('SamplingGMMs', True)
- occ_dict = configure_hazard_occurrence(
+ occ_dict = configure_hazard_occurrence( # noqa: F405
input_dir,
output_dir,
hzo_config=occurrence_info,
@@ -294,7 +294,7 @@ def hazard_job(hazard_info):
period = occ_dict.get('Period')
hc_curves = occ_dict.get('HazardCurves')
# get im exceedance probabilities
- im_exceedance_prob = get_im_exceedance_probility(
+ im_exceedance_prob = get_im_exceedance_probility( # noqa: F405
im_raw, im_type, period, hc_curves
)
# sample the earthquake scenario occurrence
@@ -304,7 +304,7 @@ def hazard_job(hazard_info):
]
else:
occurrence_rate_origin = None
- occurrence_model = sample_earthquake_occurrence(
+ occurrence_model = sample_earthquake_occurrence( # noqa: F405
model_type,
num_target_eqs,
return_periods,
@@ -313,64 +313,64 @@ def hazard_job(hazard_info):
occurrence_rate_origin,
)
# print(occurrence_model)
- P, Z = occurrence_model.get_selected_earthquake()
+ P, Z = occurrence_model.get_selected_earthquake() # noqa: N806
# now update the im_raw with selected eqs with Z > 0
id_selected_eqs = []
for i in range(len(Z)):
if P[i] > 0:
- id_selected_eqs.append(i)
+ id_selected_eqs.append(i) # noqa: PERF401
im_raw_sampled = [im_raw[i] for i in id_selected_eqs]
im_raw = im_raw_sampled
num_per_eq_avg = int(np.ceil(num_target_gmms / len(id_selected_eqs)))
# export sampled earthquakes
- _ = export_sampled_earthquakes(id_selected_eqs, scenarios, P, output_dir)
+ _ = export_sampled_earthquakes(id_selected_eqs, scenarios, P, output_dir) # noqa: F405
# Updating station information
# stations['Stations'] = stn_new
- print('HazardSimulation: uncorrelated response spectra computed.')
+ print('HazardSimulation: uncorrelated response spectra computed.') # noqa: T201
# print(im_raw)
# KZ-08/23/22: adding method to do hazard occurrence model
if occurrence_sampling and sampling_gmms:
num_gm_per_site = num_per_eq_avg
else:
num_gm_per_site = event_info['NumberPerSite']
- print('num_gm_per_site = ', num_gm_per_site)
+ print('num_gm_per_site = ', num_gm_per_site) # noqa: T201
if scenario_info['EqRupture']['Type'] not in [
'OpenQuakeClassicalPSHA',
'OpenQuakeUserConfig',
'OpenQuakeClassicalPSHA-User',
]:
# Computing correlated IMs
- ln_im_mr, mag_maf, im_list = simulate_ground_motion(
+ ln_im_mr, mag_maf, im_list = simulate_ground_motion( # noqa: F405
stations['Stations'],
im_raw,
num_gm_per_site,
event_info['CorrelationModel'],
event_info['IntensityMeasure'],
)
- print('HazardSimulation: correlated response spectra computed.')
+ print('HazardSimulation: correlated response spectra computed.') # noqa: T201
# KZ-08/23/22: adding method to do hazard occurrence model
if occurrence_sampling and sampling_gmms:
# get im exceedance probabilities for individual ground motions
# print('im_list = ',im_list)
- im_exceedance_prob_gmm = get_im_exceedance_probability_gm(
+ im_exceedance_prob_gmm = get_im_exceedance_probability_gm( # noqa: F405
np.exp(ln_im_mr), im_list, im_type, period, hc_curves
)
# sample the earthquake scenario occurrence
- occurrence_model_gmm = sample_earthquake_occurrence(
+ occurrence_model_gmm = sample_earthquake_occurrence( # noqa: F405
model_type, num_target_gmms, return_periods, im_exceedance_prob_gmm
)
# print(occurrence_model)
- P_gmm, Z_gmm = occurrence_model_gmm.get_selected_earthquake()
+ P_gmm, Z_gmm = occurrence_model_gmm.get_selected_earthquake() # noqa: N806
# now update the im_raw with selected eqs with Z > 0
id_selected_gmms = []
for i in range(len(Z_gmm)):
if P_gmm[i] > 0:
- id_selected_gmms.append(i)
+ id_selected_gmms.append(i) # noqa: PERF401
id_selected_scens = [int(x / num_gm_per_site) for x in id_selected_gmms]
id_selected_simus = [x % num_gm_per_site for x in id_selected_gmms]
# export sampled earthquakes
- _ = export_sampled_gmms(
+ _ = export_sampled_gmms( # noqa: F405
id_selected_gmms, id_selected_scens, P_gmm, output_dir
)
num_site = ln_im_mr[0].shape[0]
@@ -387,8 +387,8 @@ def hazard_job(hazard_info):
mag_maf = [[0, 0, 0, 0]]
if event_info['SaveIM'] and ln_im_mr:
- print('HazardSimulation: saving simulated intensity measures.')
- _ = export_im(
+ print('HazardSimulation: saving simulated intensity measures.') # noqa: T201
+ _ = export_im( # noqa: F405
stations['Stations'],
im_list,
ln_im_mr,
@@ -397,25 +397,25 @@ def hazard_job(hazard_info):
'SiteIM.json',
1,
)
- print('HazardSimulation: simulated intensity measures saved.')
+ print('HazardSimulation: simulated intensity measures saved.') # noqa: T201
else:
- print('HazardSimulation: IM is not required to saved or no IM is found.')
+ print('HazardSimulation: IM is not required to saved or no IM is found.') # noqa: T201
# print(np.exp(ln_im_mr[0][0, :, 1]))
# print(np.exp(ln_im_mr[0][1, :, 1]))
else:
- # TODO: extending this to other hazards
- print('HazardSimulation currently only supports earthquake simulations.')
- print('HazardSimulation: intensity measures computed.')
+ # TODO: extending this to other hazards # noqa: TD002
+ print('HazardSimulation currently only supports earthquake simulations.') # noqa: T201
+ print('HazardSimulation: intensity measures computed.') # noqa: T201
# Selecting ground motion records
if scenario_info['Type'] == 'Earthquake':
# Selecting records
data_source = event_info.get('Database', 0)
if data_source:
- print('HazardSimulation: selecting ground motion records.')
+ print('HazardSimulation: selecting ground motion records.') # noqa: T201
sf_max = event_info['ScalingFactor']['Maximum']
sf_min = event_info['ScalingFactor']['Minimum']
start_time = time.time()
- gm_id, gm_file = select_ground_motion(
+ gm_id, gm_file = select_ground_motion( # noqa: F405
im_list,
ln_im_mr,
data_source,
@@ -425,17 +425,17 @@ def hazard_job(hazard_info):
'EventGrid.csv',
stations['Stations'],
)
- print(
+ print( # noqa: T201
f'HazardSimulation: ground motion records selected ({time.time() - start_time} s).'
)
# print(gm_id)
gm_id = [int(i) for i in np.unique(gm_id)]
- gm_file = [i for i in np.unique(gm_file)]
- runtag = output_all_ground_motion_info(
+ gm_file = [i for i in np.unique(gm_file)] # noqa: C416
+ runtag = output_all_ground_motion_info( # noqa: F405
gm_id, gm_file, output_dir, 'RecordsList.csv'
)
if runtag:
- print('HazardSimulation: the ground motion list saved.')
+ print('HazardSimulation: the ground motion list saved.') # noqa: T201
else:
sys.exit(
'HazardSimulation: warning - issues with saving the ground motion list.'
@@ -444,26 +444,26 @@ def hazard_job(hazard_info):
user_name = event_info.get('UserName', None)
user_password = event_info.get('UserPassword', None)
if (user_name is not None) and (user_password is not None) and (not R2D):
- print('HazardSimulation: downloading ground motion records.')
- raw_dir = download_ground_motion(
+ print('HazardSimulation: downloading ground motion records.') # noqa: T201
+ raw_dir = download_ground_motion( # noqa: F405
gm_id, user_name, user_password, output_dir
)
if raw_dir:
- print('HazardSimulation: ground motion records downloaded.')
+ print('HazardSimulation: ground motion records downloaded.') # noqa: T201
# Parsing records
- print('HazardSimulation: parsing records.')
- record_dir = parse_record(
+ print('HazardSimulation: parsing records.') # noqa: T201
+ record_dir = parse_record( # noqa: F405, F841
gm_file,
raw_dir,
output_dir,
event_info['Database'],
event_info['OutputFormat'],
)
- print('HazardSimulation: records parsed.')
+ print('HazardSimulation: records parsed.') # noqa: T201
else:
- print('HazardSimulation: No records to be parsed.')
+ print('HazardSimulation: No records to be parsed.') # noqa: T201
else:
- print('HazardSimulation: ground motion selection is not requested.')
+ print('HazardSimulation: ground motion selection is not requested.') # noqa: T201
if __name__ == '__main__':
@@ -478,7 +478,7 @@ def hazard_job(hazard_info):
args = parser.parse_args()
# read the hazard configuration file
- with open(args.hazard_config) as f:
+ with open(args.hazard_config) as f: # noqa: PTH123
hazard_info = json.load(f)
# directory (back compatibility here)
@@ -494,20 +494,20 @@ def hazard_job(hazard_info):
dir_info['Output'] = output_dir
dir_info['Work'] = output_dir
try:
- os.mkdir(f'{output_dir}')
- except:
- print('HazardSimulation: output folder already exists.')
+ os.mkdir(f'{output_dir}') # noqa: PTH102
+ except: # noqa: E722
+ print('HazardSimulation: output folder already exists.') # noqa: T201
# site filter (if explicitly defined)
- minID = None
- maxID = None
+ minID = None # noqa: N816
+ maxID = None # noqa: N816
if args.filter:
tmp = [int(x) for x in args.filter.split('-')]
if len(tmp) == 1:
- minID = tmp[0]
- maxID = minID
+ minID = tmp[0] # noqa: N816
+ maxID = minID # noqa: N816
else:
- [minID, maxID] = tmp
+ [minID, maxID] = tmp # noqa: N816
# parse job type for set up environment and constants
try:
@@ -515,11 +515,11 @@ def hazard_job(hazard_info):
'PointSource',
'ERF',
]
- except:
+ except: # noqa: E722
opensha_flag = False
try:
oq_flag = 'OpenQuake' in hazard_info['Scenario']['EqRupture']['Type']
- except:
+ except: # noqa: E722
oq_flag = False
# dependencies
@@ -529,59 +529,59 @@ def hazard_job(hazard_info):
packages = ['selenium', 'tqdm', 'psutil', 'PuLP', 'requests']
for p in packages:
if importlib.util.find_spec(p) is None:
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', p])
+ subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', p]) # noqa: S603
# set up environment
import socket
if 'stampede2' not in socket.gethostname():
if importlib.util.find_spec('jpype') is None:
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1'])
+ subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603
import jpype
- from jpype.types import *
+ from jpype.types import * # noqa: F403
memory_total = psutil.virtual_memory().total / (1024.0**3)
memory_request = int(memory_total * 0.75)
jpype.addClassPath('./lib/OpenSHA-1.5.2.jar')
try:
jpype.startJVM(f'-Xmx{memory_request}G', convertStrings=False)
- except:
- print(
+ except: # noqa: E722
+ print( # noqa: T201
f'StartJVM of ./lib/OpenSHA-1.5.2.jar with {memory_request} GB Memory fails. Try again after releasing some memory'
)
if oq_flag:
# clear up old db.sqlite3 if any
- if os.path.isfile(os.path.expanduser('~/oqdata/db.sqlite3')):
+ if os.path.isfile(os.path.expanduser('~/oqdata/db.sqlite3')): # noqa: PTH111, PTH113
new_db_sqlite3 = True
try:
- os.remove(os.path.expanduser('~/oqdata/db.sqlite3'))
- except:
+ os.remove(os.path.expanduser('~/oqdata/db.sqlite3')) # noqa: PTH107, PTH111
+ except: # noqa: E722
new_db_sqlite3 = False
# data dir
- os.environ['OQ_DATADIR'] = os.path.join(
- os.path.abspath(output_dir),
+ os.environ['OQ_DATADIR'] = os.path.join( # noqa: PTH118
+ os.path.abspath(output_dir), # noqa: PTH100
'oqdata',
)
- print('HazardSimulation: local OQ_DATADIR = ' + os.environ.get('OQ_DATADIR'))
- if os.path.exists(os.environ.get('OQ_DATADIR')):
- print(
+ print('HazardSimulation: local OQ_DATADIR = ' + os.environ.get('OQ_DATADIR')) # noqa: T201
+ if os.path.exists(os.environ.get('OQ_DATADIR')): # noqa: PTH110
+ print( # noqa: T201
'HazardSimulation: local OQ folder already exists, overwriting it now...'
)
shutil.rmtree(os.environ.get('OQ_DATADIR'))
- os.makedirs(f"{os.environ.get('OQ_DATADIR')}")
+ os.makedirs(f"{os.environ.get('OQ_DATADIR')}") # noqa: PTH103
# import modules
- from ComputeIntensityMeasure import *
- from CreateScenario import *
- from CreateStation import *
+ from ComputeIntensityMeasure import * # noqa: F403
+ from CreateScenario import * # noqa: F403
+ from CreateStation import * # noqa: F403
# KZ-08/23/22: adding hazard occurrence model
- from HazardOccurrence import *
- from SelectGroundMotion import *
+ from HazardOccurrence import * # noqa: F403
+ from SelectGroundMotion import * # noqa: F403
if oq_flag:
# import FetchOpenQuake
- from FetchOpenQuake import *
+ from FetchOpenQuake import * # noqa: F403
# Initial process list
import psutil
@@ -598,7 +598,7 @@ def hazard_job(hazard_info):
elif args.job_type == 'Site':
site_job(hazard_info)
else:
- print('HazardSimulation: --job_type = Hazard or Site (please check).')
+ print('HazardSimulation: --job_type = Hazard or Site (please check).') # noqa: T201
# Closing the current process
sys.exit(0)
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py
index ad2710297..7d86fccae 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/HazardSimulationEQ.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2022 Leland Stanford Junior University
# Copyright (c) 2022 The Regents of the University of California
#
@@ -53,54 +53,54 @@
R2D = True
-def hazard_job(hazard_info):
+def hazard_job(hazard_info): # noqa: C901, D103, PLR0915
from CreateScenario import load_ruptures_openquake
from GMSimulators import simulate_ground_motion
try:
# oq_flag = hazard_info['Scenario']['EqRupture']['Type'] in ['oqSourceXML']
oq_flag = 'OpenQuake' in hazard_info['Scenario']['EqRupture']['Type']
- except:
+ except: # noqa: E722
oq_flag = False
# Read Site .csv
site_file = hazard_info['Site']['siteFile']
try:
stations = pd.read_csv(site_file).to_dict(orient='records')
- print('HazardSimulation: stations loaded.')
- except:
- print(f'HazardSimulation: please check the station file {site_file}')
- exit()
+ print('HazardSimulation: stations loaded.') # noqa: T201
+ except: # noqa: E722
+ print(f'HazardSimulation: please check the station file {site_file}') # noqa: T201
+ exit() # noqa: PLR1722
# print(stations)
# Scenarios
- print('HazardSimulation: loading scenarios.')
+ print('HazardSimulation: loading scenarios.') # noqa: T201
scenario_info = hazard_info['Scenario']
if scenario_info['Type'] == 'Earthquake':
# KZ-10/31/2022: checking user-provided scenarios
if scenario_info['EqRupture']['Type'] == 'oqSourceXML':
# The rup file is not enough for oq erf, so the rupture needs to be recalculated
- rupFile = scenario_info['sourceFile']
+ rupFile = scenario_info['sourceFile'] # noqa: N806
scenarios = load_ruptures_openquake(
scenario_info, stations, work_dir, site_file, rupFile
)
else:
- rupFile = scenario_info['sourceFile']
- scenarios = load_earthquake_rupFile(scenario_info, rupFile)
+ rupFile = scenario_info['sourceFile'] # noqa: N806
+ scenarios = load_earthquake_rupFile(scenario_info, rupFile) # noqa: F405
else:
- # TODO: extending this to other hazards
- print('HazardSimulation: currently only supports EQ and Wind simulations.')
+ # TODO: extending this to other hazards # noqa: TD002
+ print('HazardSimulation: currently only supports EQ and Wind simulations.') # noqa: T201
# print(scenarios)
- print('HazardSimulation: scenarios loaded.')
- selected_scen_ids = sorted(list(scenarios.keys()))
+ print('HazardSimulation: scenarios loaded.') # noqa: T201
+ selected_scen_ids = sorted(list(scenarios.keys())) # noqa: C414
# Computing intensity measures
- print('HazardSimulation: computing intensity measures.')
+ print('HazardSimulation: computing intensity measures.') # noqa: T201
if scenario_info['Type'] == 'Earthquake':
# Computing uncorrelated Sa
event_info = hazard_info['Event']
# When vector IM is used. The PGA/SA needs to be computed before PGV
im_info = event_info['IntensityMeasure']
- if im_info['Type'] == 'Vector' and 'PGV' in im_info.keys():
- PGV_info = im_info.pop('PGV')
+ if im_info['Type'] == 'Vector' and 'PGV' in im_info.keys(): # noqa: SIM118
+ PGV_info = im_info.pop('PGV') # noqa: N806
im_info.update({'PGV': PGV_info})
event_info['IntensityMeasure'] = im_info
@@ -108,7 +108,7 @@ def hazard_job(hazard_info):
opensha_flag
or hazard_info['Scenario']['EqRupture']['Type'] == 'oqSourceXML'
):
- im_raw_path, im_list = compute_im(
+ im_raw_path, im_list = compute_im( # noqa: F405
scenarios,
stations,
scenario_info,
@@ -122,7 +122,7 @@ def hazard_job(hazard_info):
event_info['IntensityMeasure'] = im_info
elif oq_flag:
# Preparing config ini for OpenQuake
- filePath_ini, oq_ver_loaded, event_info = openquake_config(
+ filePath_ini, oq_ver_loaded, event_info = openquake_config( # noqa: N806, F405
hazard_info['Site'],
scenario_info,
event_info,
@@ -140,11 +140,11 @@ def hazard_job(hazard_info):
]:
# Calling openquake to run classical PSHA
# oq_version = scenario_info['EqRupture'].get('OQVersion',default_oq_version)
- oq_run_flag = oq_run_classical_psha(
+ oq_run_flag = oq_run_classical_psha( # noqa: F405
filePath_ini,
exports='csv',
oq_version=oq_ver_loaded,
- dir_info=dir_info,
+ dir_info=dir_info, # noqa: F405
)
if oq_run_flag:
err_msg = 'HazardSimulation: OpenQuake Classical PSHA failed.'
@@ -152,18 +152,18 @@ def hazard_job(hazard_info):
err_msg = (
err_msg
+ ' Please see if there is leaked python threads in background still occupying {}.'.format(
- os.path.expanduser('~/oqdata/db.sqlite3')
+ os.path.expanduser('~/oqdata/db.sqlite3') # noqa: PTH111
)
)
- print(err_msg)
+ print(err_msg) # noqa: T201
sys.exit(err_msg)
else:
- print('HazardSimulation: OpenQuake Classical PSHA completed.')
+ print('HazardSimulation: OpenQuake Classical PSHA completed.') # noqa: T201
if scenario_info['EqRupture'].get('UHS', False):
- ln_im_mr, mag_maf, im_list = oq_read_uhs_classical_psha(
+ ln_im_mr, mag_maf, im_list = oq_read_uhs_classical_psha( # noqa: F405
scenario_info,
event_info,
- dir_info,
+ dir_info, # noqa: F405
)
else:
ln_im_mr = []
@@ -173,16 +173,16 @@ def hazard_job(hazard_info):
elif scenario_info['EqRupture']['Type'] == 'oqSourceXML':
# Creating and conducting OpenQuake calculations
- oq_calc = OpenQuakeHazardCalc(
+ oq_calc = OpenQuakeHazardCalc( # noqa: F405
filePath_ini,
event_info,
oq_ver_loaded,
dir_info=hazard_info['Directory'],
)
oq_calc.run_calc()
- im_raw = [oq_calc.eval_calc()]
+ im_raw = [oq_calc.eval_calc()] # noqa: F841
# stn_new = stations['Stations']
- print('HazardSimulation: OpenQuake Scenario calculation completed.')
+ print('HazardSimulation: OpenQuake Scenario calculation completed.') # noqa: T201
else:
sys.exit(
@@ -201,7 +201,7 @@ def hazard_job(hazard_info):
reweight_only = occurrence_info.get('ReweightOnly', False)
# KZ-10/31/22: adding a flag for whether to re-sample ground motion maps or just monte-carlo
sampling_gmms = occurrence_info.get('SamplingGMMs', True)
- occ_dict = configure_hazard_occurrence(
+ occ_dict = configure_hazard_occurrence( # noqa: F405
input_dir,
output_dir,
im_raw_path,
@@ -219,7 +219,7 @@ def hazard_job(hazard_info):
period = occ_dict.get('Period')
hc_curves = occ_dict.get('HazardCurves')
# get im exceedance probabilities
- im_exceedance_prob = get_im_exceedance_probility(
+ im_exceedance_prob = get_im_exceedance_probility( # noqa: F405
im_raw_path, im_list, im_type, period, hc_curves, selected_scen_ids
)
# sample the earthquake scenario occurrence
@@ -230,7 +230,7 @@ def hazard_job(hazard_info):
occurrence_rate_origin = [
scenarios[i].get('MeanAnnualRate') for i in selected_scen_ids
]
- occurrence_model = sample_earthquake_occurrence(
+ occurrence_model = sample_earthquake_occurrence( # noqa: F405
model_type,
num_target_eqs,
return_periods,
@@ -240,31 +240,31 @@ def hazard_job(hazard_info):
occurrence_info,
)
# print(occurrence_model)
- P, Z = occurrence_model.get_selected_earthquake()
+ P, Z = occurrence_model.get_selected_earthquake() # noqa: N806
# now update the im_raw with selected eqs with Z > 0
id_selected_eqs = []
for i in range(len(Z)):
if P[i] > 0:
- id_selected_eqs.append(selected_scen_ids[i])
+ id_selected_eqs.append(selected_scen_ids[i]) # noqa: PERF401
selected_scen_ids = id_selected_eqs
num_per_eq_avg = int(np.ceil(num_target_gmms / len(selected_scen_ids)))
# compute error from optimization residual
error = occurrence_model.get_error_vector()
# export sampled earthquakes
- _ = export_sampled_earthquakes(
+ _ = export_sampled_earthquakes( # noqa: F405
error, selected_scen_ids, scenarios, P, output_dir
)
# Updating station information
# stations['Stations'] = stn_new
- print('HazardSimulation: uncorrelated response spectra computed.')
+ print('HazardSimulation: uncorrelated response spectra computed.') # noqa: T201
# print(im_raw)
# KZ-08/23/22: adding method to do hazard occurrence model
if occurrence_sampling and sampling_gmms:
num_gm_per_site = num_per_eq_avg
else:
num_gm_per_site = event_info['NumberPerSite']
- print('num_gm_per_site = ', num_gm_per_site)
+ print('num_gm_per_site = ', num_gm_per_site) # noqa: T201
if scenario_info['EqRupture']['Type'] not in [
'OpenQuakeClassicalPSHA',
'OpenQuakeUserConfig',
@@ -281,13 +281,13 @@ def hazard_job(hazard_info):
event_info['IntensityMeasure'],
selected_scen_ids,
)
- print('HazardSimulation: correlated response spectra computed.')
+ print('HazardSimulation: correlated response spectra computed.') # noqa: T201
# KZ-08/23/22: adding method to do hazard occurrence model
if occurrence_sampling and sampling_gmms:
# get im exceedance probabilities for individual ground motions
# print('im_list = ',im_list)
im_exceedance_prob_gmm, occur_rate_origin = (
- get_im_exceedance_probability_gm(
+ get_im_exceedance_probability_gm( # noqa: F405
np.exp(ln_im_mr),
im_list,
im_type,
@@ -301,7 +301,7 @@ def hazard_job(hazard_info):
# occurrence_rate_origin = [scenarios[i].get('MeanAnnualRate') for i in range(len(scenarios))]
# else:
# occurrence_rate_origin = None
- occurrence_model_gmm = sample_earthquake_occurrence(
+ occurrence_model_gmm = sample_earthquake_occurrence( # noqa: F405
model_type,
num_target_gmms,
return_periods,
@@ -311,12 +311,12 @@ def hazard_job(hazard_info):
occurrence_info,
)
# print(occurrence_model)
- P_gmm, Z_gmm = occurrence_model_gmm.get_selected_earthquake()
+ P_gmm, Z_gmm = occurrence_model_gmm.get_selected_earthquake() # noqa: N806
# now update the im_raw with selected eqs with Z > 0
id_selected_gmms = []
for i in range(len(Z_gmm)):
if P_gmm[i] > 0:
- id_selected_gmms.append(i)
+ id_selected_gmms.append(i) # noqa: PERF401
id_selected_scens = np.array(
[
selected_scen_ids[int(x / num_gm_per_site)]
@@ -331,13 +331,13 @@ def hazard_job(hazard_info):
id_selected_gmms, id_selected_scens, P_gmm, output_dir
)
- selected_scen_ids_step2 = sorted(list(set(id_selected_scens)))
+ selected_scen_ids_step2 = sorted(list(set(id_selected_scens))) # noqa: C414
sampled_ln_im_mr = [None] * len(selected_scen_ids_step2)
sampled_mag_maf = [None] * len(selected_scen_ids_step2)
for i, selected_scen in enumerate(selected_scen_ids_step2):
scen_ind = selected_scen_ids.index(selected_scen)
- selected_simus_in_scen_i = sorted(
+ selected_simus_in_scen_i = sorted( # noqa: C414
list(set(id_selected_simus[id_selected_scens == selected_scen]))
)
sampled_ln_im_mr[i] = ln_im_mr[scen_ind][
@@ -357,19 +357,19 @@ def hazard_job(hazard_info):
# print(np.exp(ln_im_mr[0][0, :, 1]))
# print(np.exp(ln_im_mr[0][1, :, 1]))
else:
- # TODO: extending this to other hazards
- print('HazardSimulation currently only supports earthquake simulations.')
- print('HazardSimulation: intensity measures computed.')
+ # TODO: extending this to other hazards # noqa: TD002
+ print('HazardSimulation currently only supports earthquake simulations.') # noqa: T201
+ print('HazardSimulation: intensity measures computed.') # noqa: T201
# Selecting ground motion records
if scenario_info['Type'] == 'Earthquake':
# Selecting records
data_source = event_info.get('Database', 0)
if data_source:
- print('HazardSimulation: selecting ground motion records.')
+ print('HazardSimulation: selecting ground motion records.') # noqa: T201
sf_max = event_info['ScalingFactor']['Maximum']
sf_min = event_info['ScalingFactor']['Minimum']
start_time = time.time()
- gm_id, gm_file = select_ground_motion(
+ gm_id, gm_file = select_ground_motion( # noqa: F405
im_list,
ln_im_mr,
data_source,
@@ -380,17 +380,17 @@ def hazard_job(hazard_info):
stations,
selected_scen_ids,
)
- print(
+ print( # noqa: T201
f'HazardSimulation: ground motion records selected ({time.time() - start_time} s).'
)
# print(gm_id)
gm_id = [int(i) for i in np.unique(gm_id)]
- gm_file = [i for i in np.unique(gm_file)]
- runtag = output_all_ground_motion_info(
+ gm_file = [i for i in np.unique(gm_file)] # noqa: C416
+ runtag = output_all_ground_motion_info( # noqa: F405
gm_id, gm_file, output_dir, 'RecordsList.csv'
)
if runtag:
- print('HazardSimulation: the ground motion list saved.')
+ print('HazardSimulation: the ground motion list saved.') # noqa: T201
else:
sys.exit(
'HazardSimulation: warning - issues with saving the ground motion list.'
@@ -399,31 +399,31 @@ def hazard_job(hazard_info):
user_name = event_info.get('UserName', None)
user_password = event_info.get('UserPassword', None)
if (user_name is not None) and (user_password is not None) and (not R2D):
- print('HazardSimulation: downloading ground motion records.')
- raw_dir = download_ground_motion(
+ print('HazardSimulation: downloading ground motion records.') # noqa: T201
+ raw_dir = download_ground_motion( # noqa: F405
gm_id, user_name, user_password, output_dir
)
if raw_dir:
- print('HazardSimulation: ground motion records downloaded.')
+ print('HazardSimulation: ground motion records downloaded.') # noqa: T201
# Parsing records
- print('HazardSimulation: parsing records.')
- record_dir = parse_record(
+ print('HazardSimulation: parsing records.') # noqa: T201
+ record_dir = parse_record( # noqa: F405, F841
gm_file,
raw_dir,
output_dir,
event_info['Database'],
event_info['OutputFormat'],
)
- print('HazardSimulation: records parsed.')
+ print('HazardSimulation: records parsed.') # noqa: T201
else:
- print('HazardSimulation: No records to be parsed.')
+ print('HazardSimulation: No records to be parsed.') # noqa: T201
else:
- print('HazardSimulation: ground motion selection is not requested.')
+ print('HazardSimulation: ground motion selection is not requested.') # noqa: T201
gf_im_list = []
- if 'GroundFailure' in hazard_info['Event'].keys():
+ if 'GroundFailure' in hazard_info['Event'].keys(): # noqa: SIM118
ground_failure_info = hazard_info['Event']['GroundFailure']
- if 'Liquefaction' in ground_failure_info.keys():
+ if 'Liquefaction' in ground_failure_info.keys(): # noqa: SIM118
import liquefaction
trigging_info = ground_failure_info['Liquefaction']['Triggering']
@@ -445,7 +445,7 @@ def hazard_job(hazard_info):
)
del trigging_model
gf_im_list += trigging_info['Output']
- if 'LateralSpreading' in ground_failure_info['Liquefaction'].keys():
+ if 'LateralSpreading' in ground_failure_info['Liquefaction'].keys(): # noqa: SIM118
lat_spread_info = ground_failure_info['Liquefaction'][
'LateralSpreading'
]
@@ -463,7 +463,7 @@ def hazard_job(hazard_info):
ln_im_mr, mag_maf, im_list
)
gf_im_list += lat_spread_info['Output']
- if 'Settlement' in ground_failure_info['Liquefaction'].keys():
+ if 'Settlement' in ground_failure_info['Liquefaction'].keys(): # noqa: SIM118
settlement_info = ground_failure_info['Liquefaction']['Settlement']
settlement_model = getattr(liquefaction, settlement_info['Model'])()
ln_im_mr, mag_maf, im_list = settlement_model.run(
@@ -472,8 +472,8 @@ def hazard_job(hazard_info):
gf_im_list += settlement_info['Output']
if event_info['SaveIM'] and ln_im_mr:
- print('HazardSimulation: saving simulated intensity measures.')
- _ = export_im(
+ print('HazardSimulation: saving simulated intensity measures.') # noqa: T201
+ _ = export_im( # noqa: F405
stations,
im_list,
ln_im_mr,
@@ -484,9 +484,9 @@ def hazard_job(hazard_info):
gf_im_list,
selected_scen_ids,
)
- print('HazardSimulation: simulated intensity measures saved.')
+ print('HazardSimulation: simulated intensity measures saved.') # noqa: T201
else:
- print('HazardSimulation: IM is not required to saved or no IM is found.')
+ print('HazardSimulation: IM is not required to saved or no IM is found.') # noqa: T201
# If hazard downsampling algorithm is used. Save the errors.
@@ -498,17 +498,17 @@ def hazard_job(hazard_info):
args = parser.parse_args()
# read the hazard configuration file
- with open(args.hazard_config) as f:
+ with open(args.hazard_config) as f: # noqa: PTH123
hazard_info = json.load(f)
# directory (back compatibility here)
work_dir = hazard_info['Directory']
- input_dir = os.path.join(work_dir, 'Input')
- output_dir = os.path.join(work_dir, 'Output')
+ input_dir = os.path.join(work_dir, 'Input') # noqa: PTH118
+ output_dir = os.path.join(work_dir, 'Output') # noqa: PTH118
try:
- os.mkdir(f'{output_dir}')
- except:
- print('HazardSimulation: output folder already exists.')
+ os.mkdir(f'{output_dir}') # noqa: PTH102
+ except: # noqa: E722
+ print('HazardSimulation: output folder already exists.') # noqa: T201
# parse job type for set up environment and constants
try:
@@ -516,11 +516,11 @@ def hazard_job(hazard_info):
'PointSource',
'ERF',
]
- except:
+ except: # noqa: E722
opensha_flag = False
try:
oq_flag = hazard_info['Scenario']['EqRupture']['Type'] == 'oqSourceXML'
- except:
+ except: # noqa: E722
oq_flag = False
# dependencies
@@ -530,59 +530,59 @@ def hazard_job(hazard_info):
packages = ['selenium', 'tqdm', 'psutil', 'PuLP', 'requests']
for p in packages:
if importlib.util.find_spec(p) is None:
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', p])
+ subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', p]) # noqa: S603
# set up environment
import socket
if 'stampede2' not in socket.gethostname():
if importlib.util.find_spec('jpype') is None:
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1'])
+ subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603
import jpype
- from jpype.types import *
+ from jpype.types import * # noqa: F403
memory_total = psutil.virtual_memory().total / (1024.0**3)
memory_request = int(memory_total * 0.75)
jpype.addClassPath('./lib/OpenSHA-1.5.2.jar')
try:
jpype.startJVM(f'-Xmx{memory_request}G', convertStrings=False)
- except:
- print(
+ except: # noqa: E722
+ print( # noqa: T201
f'StartJVM of ./lib/OpenSHA-1.5.2.jar with {memory_request} GB Memory fails. Try again after releasing some memory'
)
if oq_flag:
# clear up old db.sqlite3 if any
- if os.path.isfile(os.path.expanduser('~/oqdata/db.sqlite3')):
+ if os.path.isfile(os.path.expanduser('~/oqdata/db.sqlite3')): # noqa: PTH111, PTH113
new_db_sqlite3 = True
try:
- os.remove(os.path.expanduser('~/oqdata/db.sqlite3'))
- except:
+ os.remove(os.path.expanduser('~/oqdata/db.sqlite3')) # noqa: PTH107, PTH111
+ except: # noqa: E722
new_db_sqlite3 = False
# data dir
- os.environ['OQ_DATADIR'] = os.path.join(
- os.path.abspath(output_dir),
+ os.environ['OQ_DATADIR'] = os.path.join( # noqa: PTH118
+ os.path.abspath(output_dir), # noqa: PTH100
'oqdata',
)
- print('HazardSimulation: local OQ_DATADIR = ' + os.environ.get('OQ_DATADIR'))
- if os.path.exists(os.environ.get('OQ_DATADIR')):
- print(
+ print('HazardSimulation: local OQ_DATADIR = ' + os.environ.get('OQ_DATADIR')) # noqa: T201
+ if os.path.exists(os.environ.get('OQ_DATADIR')): # noqa: PTH110
+ print( # noqa: T201
'HazardSimulation: local OQ folder already exists, overwriting it now...'
)
shutil.rmtree(os.environ.get('OQ_DATADIR'))
- os.makedirs(f"{os.environ.get('OQ_DATADIR')}")
+ os.makedirs(f"{os.environ.get('OQ_DATADIR')}") # noqa: PTH103
# import modules
- from ComputeIntensityMeasure import *
- from CreateScenario import *
- from CreateStation import *
+ from ComputeIntensityMeasure import * # noqa: F403
+ from CreateScenario import * # noqa: F403
+ from CreateStation import * # noqa: F403
# KZ-08/23/22: adding hazard occurrence model
- from HazardOccurrence import *
- from SelectGroundMotion import *
+ from HazardOccurrence import * # noqa: F403
+ from SelectGroundMotion import * # noqa: F403
if oq_flag:
# import FetchOpenQuake
- from FetchOpenQuake import *
+ from FetchOpenQuake import * # noqa: F403
# untar site databases
# site_database = ['global_vs30_4km.tar.gz','global_zTR_4km.tar.gz','thompson_vs30_4km.tar.gz']
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py
index 432a5e2b3..5083e18fc 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/ScenarioForecast.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2022 Leland Stanford Junior University
# Copyright (c) 2022 The Regents of the University of California
#
@@ -54,17 +54,17 @@
args = parser.parse_args()
# read the hazard configuration file
- with open(args.hazard_config) as f:
+ with open(args.hazard_config) as f: # noqa: PTH123
hazard_info = json.load(f)
# directory (back compatibility here)
work_dir = hazard_info['Directory']
- input_dir = os.path.join(work_dir, 'Input')
- output_dir = os.path.join(work_dir, 'Output')
+ input_dir = os.path.join(work_dir, 'Input') # noqa: PTH118
+ output_dir = os.path.join(work_dir, 'Output') # noqa: PTH118
try:
- os.mkdir(f'{output_dir}')
- except:
- print('HazardSimulation: output folder already exists.')
+ os.mkdir(f'{output_dir}') # noqa: PTH102
+ except: # noqa: E722
+ print('HazardSimulation: output folder already exists.') # noqa: T201
# parse job type for set up environment and constants
try:
@@ -72,14 +72,14 @@
'PointSource',
'ERF',
]
- except:
+ except: # noqa: E722
opensha_flag = False
try:
oq_flag = (
'OpenQuake' in hazard_info['Scenario']['EqRupture']['Type']
or 'oqSourceXML' in hazard_info['Scenario']['EqRupture']['Type']
)
- except:
+ except: # noqa: E722
oq_flag = False
# dependencies
@@ -90,16 +90,16 @@
# Please install it by running
# "{sys.executable} -m pip install -q {p}"
# in your terminal or command prompt""")
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', p])
+ subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', p]) # noqa: S603
# set up environment
import socket
if 'stampede2' not in socket.gethostname():
if importlib.util.find_spec('jpype') is None:
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1'])
+ subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'JPype1']) # noqa: S603
import jpype
- from jpype.types import *
+ from jpype.types import * # noqa: F403
memory_total = psutil.virtual_memory().total / (1024.0**3)
memory_request = int(memory_total * 0.75)
@@ -129,7 +129,7 @@
if oq_flag:
# import FetchOpenQuake
- from FetchOpenQuake import *
+ from FetchOpenQuake import * # noqa: F403
# untar site databases
site_database = [
@@ -137,12 +137,12 @@
'global_zTR_4km.tar.gz',
'thompson_vs30_4km.tar.gz',
]
- print('HazardSimulation: Extracting site databases.')
- cwd = os.path.dirname(os.path.realpath(__file__))
+ print('HazardSimulation: Extracting site databases.') # noqa: T201
+ cwd = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120
for cur_database in site_database:
# subprocess.run(["tar","-xvzf",cwd+"/database/site/"+cur_database,"-C",cwd+"/database/site/"])
tar = tarfile.open(cwd + '/database/site/' + cur_database, 'r:gz')
- tar.extractall(cwd + '/database/site/')
+ tar.extractall(cwd + '/database/site/') # noqa: S202
tar.close()
# # Initial process list
@@ -150,7 +150,7 @@
# proc_list_init = [p.info for p in psutil.process_iter(attrs=['pid', 'name']) if 'python' in p.info['name']]
# Sites and stations
- print('HazardSimulation: creating stations.')
+ print('HazardSimulation: creating stations.') # noqa: T201
site_info = hazard_info['Site']
z1_tag = 0
z25_tag = 0
@@ -164,11 +164,11 @@
site_info['Z1pt0'].update({'z1_tag': z1_tag})
site_info['Z2pt5'].update({'z25_tag': z25_tag})
if site_info['Type'] == 'From_CSV':
- input_file = os.path.join(input_dir, site_info['input_file'])
+ input_file = os.path.join(input_dir, site_info['input_file']) # noqa: PTH118
output_file = site_info.get('output_file', False)
if output_file:
- output_file = os.path.join(input_dir, output_file)
- filter = site_info['filter']
+ output_file = os.path.join(input_dir, output_file) # noqa: PTH118
+ filter = site_info['filter'] # noqa: A001
# Creating stations from the csv input file
stations = create_stations(
input_file,
@@ -179,17 +179,17 @@
site_info['Z2pt5'],
)
else:
- print("""Only From_CSV site_info['Type'] is supported now""")
+ print("""Only From_CSV site_info['Type'] is supported now""") # noqa: T201
if stations:
- print('ScenarioForecast: stations created.')
+ print('ScenarioForecast: stations created.') # noqa: T201
else:
- print(
+ print( # noqa: T201
'HazardSimulation: please check the "Input" directory in the configuration json file.'
)
- exit()
+ exit() # noqa: PLR1722
# Scenarios
- print('HazardSimulation: creating scenarios.')
+ print('HazardSimulation: creating scenarios.') # noqa: T201
scenario_info = hazard_info['Scenario']
if scenario_info['Type'] == 'Earthquake':
# KZ-10/31/2022: checking user-provided scenarios
@@ -211,10 +211,10 @@
# Creating wind scenarios
create_wind_scenarios(scenario_info, stations, input_dir)
else:
- # TODO: extending this to other hazards
- print('HazardSimulation: currently only supports EQ and Wind simulations.')
+ # TODO: extending this to other hazards # noqa: TD002
+ print('HazardSimulation: currently only supports EQ and Wind simulations.') # noqa: T201
# print(scenarios)
- print('HazardSimulation: scenarios created.')
+ print('HazardSimulation: scenarios created.') # noqa: T201
# Closing the current process
sys.exit(0)
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/SelectGroundMotion.py b/modules/performRegionalEventSimulation/regionalGroundMotion/SelectGroundMotion.py
index 5187e4b09..884a2d1cc 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/SelectGroundMotion.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/SelectGroundMotion.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -44,17 +44,17 @@
R2D = True
if not R2D:
pass
-import copy
-import csv
+import copy # noqa: E402
+import csv # noqa: E402
-import numpy as np
-import pandas as pd
+import numpy as np # noqa: E402
+import pandas as pd # noqa: E402
-class GM_Selector:
+class GM_Selector: # noqa: D101
def __init__(
self,
- gmdb_im_df=dict(),
+ gmdb_im_df=dict(), # noqa: B006, C408
num_records=1,
sf_min=None,
sf_max=None,
@@ -65,7 +65,7 @@ def __init__(
self.set_sf_range(sf_min, sf_max)
self.set_target_im(target_im)
- def set_gmdb_im_df(self, gmdb_im_df):
+ def set_gmdb_im_df(self, gmdb_im_df): # noqa: D102
self.gmdb_im_df = gmdb_im_df
self.num_gm = len(gmdb_im_df['RSN'])
tmp_list = list(gmdb_im_df.keys())
@@ -79,10 +79,10 @@ def set_gmdb_im_df(self, gmdb_im_df):
tmp_scalable.append(1)
self.scalable = tmp_scalable
- def set_num_records(self, num_records):
+ def set_num_records(self, num_records): # noqa: D102
self.num_records = num_records
- def set_sf_range(self, sf_min, sf_max):
+ def set_sf_range(self, sf_min, sf_max): # noqa: D102
if sf_min is None:
self.sf_min = 0.0001
else:
@@ -93,10 +93,10 @@ def set_sf_range(self, sf_min, sf_max):
self.sf_max = sf_max
self.sf_range = np.linspace(self.sf_min, self.sf_max, 100)
- def set_target_im(self, target_im):
+ def set_target_im(self, target_im): # noqa: D102
self.target_im = [target_im for k in range(self.num_gm)]
- def select_records(self):
+ def select_records(self): # noqa: D102
im_table = self.gmdb_im_df.iloc[:, 1:]
min_err = 1000000.0
for s in self.sf_range:
@@ -114,11 +114,11 @@ def select_records(self):
self.loc_tag = tmp_tag
self.min_err = min_err
- self.rsn_tag = self.gmdb_im_df['RSN'].values.tolist()[tmp_tag]
+ self.rsn_tag = self.gmdb_im_df['RSN'].values.tolist()[tmp_tag] # noqa: PD011
self.sf = sf
-def select_ground_motion(
+def select_ground_motion( # noqa: C901, D103
im_list,
target_ln_im,
gmdb_file,
@@ -131,7 +131,7 @@ def select_ground_motion(
):
# Loading gmdb
if gmdb_file == 'PEER NGA West 2':
- cwd = os.path.dirname(os.path.realpath(__file__))
+ cwd = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120
gmdb = pd.read_csv(
cwd + '/database/gmdb/NGAWest2.csv',
header=0,
@@ -141,24 +141,24 @@ def select_ground_motion(
# Parsing spectral data
num_gm = len(gmdb['RecId'])
tmp = gmdb.keys()[37:147]
- T_db = [float(a.replace('T', '').replace('S', '')) for a in tmp]
+ T_db = [float(a.replace('T', '').replace('S', '')) for a in tmp] # noqa: N806
psa_db = gmdb.iloc[:, 37:147]
- pga = gmdb.iloc[:, 34]
- pgv = gmdb.iloc[:, 35]
- pgd = gmdb.iloc[:, 36]
+ pga = gmdb.iloc[:, 34] # noqa: F841
+ pgv = gmdb.iloc[:, 35] # noqa: F841
+ pgd = gmdb.iloc[:, 36] # noqa: F841
# Scaling factors
- sf_range = np.linspace(sf_min, sf_max, 100)
+ sf_range = np.linspace(sf_min, sf_max, 100) # noqa: F841
# Selected ground motion ID
gm_id = []
sf_data = []
filename = []
# get available key names
# Parese im_list
- target_period = []
+ target_period = [] # noqa: F841
im_map = {'PGA': 34, 'PGV': 35, 'PGD': 36, 'DS575H': 151, 'DS595H': 152}
im_loc_tag = []
- gmdb_im_dict = dict()
- gmdb_im_dict.update({'RSN': gmdb['RecId'].values.tolist()})
+ gmdb_im_dict = dict() # noqa: C408
+ gmdb_im_dict.update({'RSN': gmdb['RecId'].values.tolist()}) # noqa: PD011
for cur_im in im_list:
if cur_im.startswith('SA'):
cur_period = float(cur_im[3:-1])
@@ -176,7 +176,7 @@ def select_ground_motion(
{
cur_im: [
x[0]
- for x in gmdb.iloc[:, im_loc_tag].values.tolist()
+ for x in gmdb.iloc[:, im_loc_tag].values.tolist() # noqa: PD011
]
}
)
@@ -187,14 +187,14 @@ def select_ground_motion(
for cur_target in target_ln_im:
tmp_scen = eq_ids[count] + 1
count = count + 1
- print('-Scenario #' + str(tmp_scen))
+ print('-Scenario #' + str(tmp_scen)) # noqa: T201
num_stations, num_periods, num_simu = cur_target.shape
tmp_id = np.zeros((num_stations, num_simu))
tmp_sf = np.zeros((num_stations, num_simu))
tmp_min_err = np.zeros((num_stations, num_simu))
tmp_filename = []
for i in range(num_simu):
- print('--Realization #' + str(i + 1))
+ print('--Realization #' + str(i + 1)) # noqa: T201
for j in range(num_stations):
# create a ground motion selector
gm_selector = GM_Selector(
@@ -242,7 +242,7 @@ def select_ground_motion(
# vs30 = [stations[j]['vs30'] for j in range(len(stations))]
# DepthToRock is not used in NGA-West2 GMPEs and is not saved
# zTR = [stations[j]['DepthToRock'] for j in range(len(stations))]
- df = pd.DataFrame(
+ df = pd.DataFrame( # noqa: PD901
{
'GP_file': station_name,
'Longitude': lon,
@@ -252,44 +252,44 @@ def select_ground_motion(
# 'DepthToRock': zTR
}
)
- output_dir = os.path.join(
- os.path.dirname(Path(output_dir)),
- os.path.basename(Path(output_dir)),
+ output_dir = os.path.join( # noqa: PTH118
+ os.path.dirname(Path(output_dir)), # noqa: PTH120
+ os.path.basename(Path(output_dir)), # noqa: PTH119
)
- df.to_csv(os.path.join(output_dir, output_file), index=False)
+ df.to_csv(os.path.join(output_dir, output_file), index=False) # noqa: PTH118
for cur_scen in range(len(gm_id)):
if len(gm_id) > 1:
cur_scen_folder = 'scenario' + str(eq_ids[cur_scen] + 1)
try:
- os.mkdir(os.path.join(output_dir, cur_scen_folder))
- except:
- print('SelectGroundMotion: scenario folder already exists.')
- cur_output_dir = os.path.join(output_dir, cur_scen_folder)
+ os.mkdir(os.path.join(output_dir, cur_scen_folder)) # noqa: PTH102, PTH118
+ except: # noqa: E722
+ print('SelectGroundMotion: scenario folder already exists.') # noqa: T201
+ cur_output_dir = os.path.join(output_dir, cur_scen_folder) # noqa: PTH118
else:
cur_output_dir = output_dir
for i, site_id in enumerate(station_name):
gm_file = ['RSN' + str(int(j)) for j in gm_id[cur_scen][i]]
- factor = [j for j in sf_data[cur_scen][i]]
- df = pd.DataFrame({'TH_file': gm_file, 'factor': factor})
- df.to_csv(os.path.join(cur_output_dir, site_id), index=False)
+ factor = [j for j in sf_data[cur_scen][i]] # noqa: C416
+ df = pd.DataFrame({'TH_file': gm_file, 'factor': factor}) # noqa: PD901
+ df.to_csv(os.path.join(cur_output_dir, site_id), index=False) # noqa: PTH118
# return
return gm_id, filename
-def output_all_ground_motion_info(gm_id, gm_file, output_dir, filename):
+def output_all_ground_motion_info(gm_id, gm_file, output_dir, filename): # noqa: D103
# Writing all record names to a csv file
- print(gm_file)
+ print(gm_file) # noqa: T201
try:
- with open(os.path.join(output_dir, filename), 'w') as f:
+ with open(os.path.join(output_dir, filename), 'w') as f: # noqa: PTH118, PTH123
w = csv.writer(f)
if gm_file:
w.writerow(gm_file)
- with open(os.path.join(output_dir, 'RSN.csv'), 'w') as f:
+ with open(os.path.join(output_dir, 'RSN.csv'), 'w') as f: # noqa: PTH118, PTH123
w = csv.writer(f)
if gm_id:
w.writerow(gm_id)
- return 1
- except:
+ return 1 # noqa: TRY300
+ except: # noqa: E722
return 0
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/USGS_API.py b/modules/performRegionalEventSimulation/regionalGroundMotion/USGS_API.py
index 715a08c48..facf4e9be 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/USGS_API.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/USGS_API.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2022 Leland Stanford Junior University
# Copyright (c) 2022 The Regents of the University of California
#
@@ -44,7 +44,7 @@
import requests
-class USGS_HazardCurve:
+class USGS_HazardCurve: # noqa: D101
def __init__(
self,
longitude=None,
@@ -55,37 +55,37 @@ def __init__(
tag=None,
):
if self._load_config():
- print('USGS_HazardCurve.__init__: configuration loaded.')
+ print('USGS_HazardCurve.__init__: configuration loaded.') # noqa: T201
else:
- print('USGS_HazardCurve.__init__: error in loading configuration file.')
+ print('USGS_HazardCurve.__init__: error in loading configuration file.') # noqa: T201
return
if self._check_edition(edition):
self.edition = self._check_edition(edition)
else:
- print(
+ print( # noqa: T201
f'USGS_HazardCurve.__init__: edition {edition} is not supported by USGS.'
)
return
query_region = self._get_region(longitude, latitude)
if query_region is None:
- print(
+ print( # noqa: T201
f'USGS_HazardCurve.__init__: site (lon, lat) = ({longitude},{latitude}) is not supported.'
)
return
- else:
+ else: # noqa: RET505
self.longitude = longitude
self.latitude = latitude
self.region = query_region
- print(
+ print( # noqa: T201
f'USGS_HazardCurve.__init__: site (lon, lat) = ({longitude},{latitude}) is found in USGS region {self.region}.'
)
if self._check_region(self.region):
- print(f'USGS_HazardCurve.__init__: region {self.region} is set up.')
+ print(f'USGS_HazardCurve.__init__: region {self.region} is set up.') # noqa: T201
else:
- print(
+ print( # noqa: T201
f'USGS_HazardCurve.__init__: region {self.region} is not supported by edition {self.edition}.'
)
return
@@ -93,7 +93,7 @@ def __init__(
if self._check_vs30(vs30):
self.vs30 = self._check_vs30(vs30)
else:
- print(
+ print( # noqa: T201
f'USGS_HazardCurve.__init__: vs30 {vs30} is not supported by edition {self.edition} and region {self.region}.'
)
return
@@ -101,39 +101,39 @@ def __init__(
if self._check_imt(imt):
self.imt = imt
else:
- print(f'USGS_HazardCurve.__init__: imt {imt} is not supported.')
+ print(f'USGS_HazardCurve.__init__: imt {imt} is not supported.') # noqa: T201
return
self.tag = tag
# return
- print('USGS_HazardCurve.__init__: configuration done.')
+ print('USGS_HazardCurve.__init__: configuration done.') # noqa: T201
return
def _load_config(self):
- cur_path = os.path.dirname(os.path.abspath(__file__))
- config_file = os.path.join(cur_path, 'lib', 'USGS_HazardCurveConfig.json')
+ cur_path = os.path.dirname(os.path.abspath(__file__)) # noqa: PTH100, PTH120
+ config_file = os.path.join(cur_path, 'lib', 'USGS_HazardCurveConfig.json') # noqa: PTH118
try:
- with open(config_file) as f:
+ with open(config_file) as f: # noqa: PTH123
self.config = json.load(f)
- return True
- except:
+ return True # noqa: TRY300
+ except: # noqa: E722
self.config = {}
return False
- def _check_edition(self, edition, auto_correction=True):
+ def _check_edition(self, edition, auto_correction=True): # noqa: FBT002
# available editions
ed_list = self.config.get('parameters').get('edition').get('values')
self.avail_editions = [x.get('value') for x in ed_list]
- print(
+ print( # noqa: T201
f'USGS_HazardCurve._check_edition: available editions: {self.avail_editions}'
)
# check
if edition in self.avail_editions:
return edition
- elif auto_correction:
+ elif auto_correction: # noqa: RET505
edition = self.avail_editions[0]
- return edition
+ return edition # noqa: RET504
else:
return False
@@ -167,9 +167,9 @@ def _check_region(self, region):
)
# check
- if region in self.avail_regions:
+ if region in self.avail_regions: # noqa: SIM103
return True
- else:
+ else: # noqa: RET505
return False
def _check_vs30(self, vs30):
@@ -207,17 +207,17 @@ def _check_imt(self, imt):
for x in imt_available
if x.startswith('SA')
]
- print('Periods available = ', period_available)
+ print('Periods available = ', period_available) # noqa: T201
if imt in imt_available:
self.imt_list = [imt]
return True
- else:
+ else: # noqa: RET505
cur_period = float(imt.replace('P', '.')[2:])
if cur_period < np.min(period_available) or cur_period > np.max(
period_available
):
return False
- else:
+ else: # noqa: RET505
# interpolate periods
self.period_list = []
for i, p in enumerate(period_available):
@@ -231,33 +231,33 @@ def _check_imt(self, imt):
# print('self.imt_list = ',self.imt_list)
return True
- def fetch_url(self):
+ def fetch_url(self): # noqa: D102
self.res_json = []
for cur_imt in self.imt_list:
# set url
usgs_url = f'https://earthquake.usgs.gov/nshmp-haz-ws/hazard/{self.edition}/{self.region}/{self.longitude}/{self.latitude}/{cur_imt}/{self.vs30}'
- print(f'USGS_HazardCurve.fetch_url: {usgs_url}.\n')
+ print(f'USGS_HazardCurve.fetch_url: {usgs_url}.\n') # noqa: T201
# request
- res = requests.get(usgs_url)
- if res.status_code == 200:
+ res = requests.get(usgs_url) # noqa: S113
+ if res.status_code == 200: # noqa: PLR2004
self.res_json.append(res.json())
# print('USGS_HazardCurve.fetch_url: {}'.format(self.res_json))
else:
# try 10 more times to overcome the api traffic issue
- for i in range(10):
- res = requests.get(usgs_url)
- if res.status_code == 200:
+ for i in range(10): # noqa: B007
+ res = requests.get(usgs_url) # noqa: S113
+ if res.status_code == 200: # noqa: PLR2004
self.res_json.append(res.json())
return True
self.res_json.append(None)
- print('USGS_HazardCurve.fetch_url: cannot get the data')
+ print('USGS_HazardCurve.fetch_url: cannot get the data') # noqa: T201
return False
return True
- def get_hazard_curve(self):
+ def get_hazard_curve(self): # noqa: D102
cur_ims = []
cur_mafs = []
cur_rps = []
@@ -289,4 +289,4 @@ def get_hazard_curve(self):
dict_hc = {'SiteID': self.tag, 'ReturnPeriod': self.rps, 'IM': self.ims}
- return dict_hc
+ return dict_hc # noqa: RET504
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py
index 643529922..8280f3c00 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/CorrelationModel.py
@@ -1,4 +1,4 @@
-#
+# # noqa: N999, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -44,7 +44,7 @@
from scipy.interpolate import interp1d, interp2d
-def baker_jayaram_correlation_2008(im1, im2, flag_orth=False):
+def baker_jayaram_correlation_2008(im1, im2, flag_orth=False): # noqa: FBT002, C901
"""Computing inter-event correlation coeffcieint between Sa of two periods
Reference:
Baker and Jayaram (2008) Correlation of Spectral Acceleration
@@ -58,46 +58,46 @@ def baker_jayaram_correlation_2008(im1, im2, flag_orth=False):
rho: correlation coefficient
Note:
The valid range of T1 and T2 is 0.01s ~ 10.0s
- """
+ """ # noqa: D205, D400, D401
# Parse periods from im1 and im2
if im1.startswith('SA'):
- T1 = float(im1[3:-1])
+ T1 = float(im1[3:-1]) # noqa: N806
elif im1.startswith('PGA'):
- T1 = 0.0
+ T1 = 0.0 # noqa: N806
else:
return 0.0
if im2.startswith('SA'):
- T2 = float(im2[3:-1])
+ T2 = float(im2[3:-1]) # noqa: N806
elif im2.startswith('PGA'):
- T2 = 0.0
+ T2 = 0.0 # noqa: N806
else:
return 0.0
# Compute Tmin and Tmax (lower bounds 0.01 for T < 0.01)
- Tmin = max(min([T1, T2]), 0.01)
- Tmax = max(max([T1, T2]), 0.01)
+ Tmin = max(min([T1, T2]), 0.01) # noqa: N806
+ Tmax = max(max([T1, T2]), 0.01) # noqa: N806, PLW3301
# Coefficient C1
- C1 = 1.0 - np.cos(np.pi / 2.0 - 0.366 * np.log(Tmax / max([Tmin, 0.109])))
+ C1 = 1.0 - np.cos(np.pi / 2.0 - 0.366 * np.log(Tmax / max([Tmin, 0.109]))) # noqa: N806
# Coefficient C2
- if Tmax < 0.2:
- C2 = 1.0 - 0.105 * (1.0 - 1.0 / (1.0 + np.exp(100.0 * Tmax - 5.0))) * (
+ if Tmax < 0.2: # noqa: PLR2004
+ C2 = 1.0 - 0.105 * (1.0 - 1.0 / (1.0 + np.exp(100.0 * Tmax - 5.0))) * ( # noqa: N806
Tmax - Tmin
) / (Tmax - 0.0099)
else:
- C2 = 0.0
+ C2 = 0.0 # noqa: N806
# Coefficient C3
- if Tmax < 0.109:
- C3 = C2
+ if Tmax < 0.109: # noqa: PLR2004
+ C3 = C2 # noqa: N806
else:
- C3 = C1
+ C3 = C1 # noqa: N806
# Coefficient C4
- C4 = C1 + 0.5 * (np.sqrt(C3) - C3) * (1.0 + np.cos(np.pi * Tmin / 0.109))
+ C4 = C1 + 0.5 * (np.sqrt(C3) - C3) * (1.0 + np.cos(np.pi * Tmin / 0.109)) # noqa: N806
# rho for a single component
- if Tmax <= 0.109:
+ if Tmax <= 0.109: # noqa: PLR2004
rho = C2
- elif Tmin > 0.109:
+ elif Tmin > 0.109: # noqa: PLR2004
rho = C1
- elif Tmax < 0.2:
+ elif Tmax < 0.2: # noqa: PLR2004
rho = min([C2, C4])
else:
rho = C4
@@ -108,7 +108,7 @@ def baker_jayaram_correlation_2008(im1, im2, flag_orth=False):
return rho
-def bradley_correlation_2011(IM, T=None, flag_Ds=True):
+def bradley_correlation_2011(IM, T=None, flag_Ds=True): # noqa: FBT002, C901, N803, PLR0911
"""Computing inter-event correlation coeffcieint between Sa(T) and Ds575/D595
Reference:
Bradley (2011) Correlation of Significant Duration with Amplitude and
@@ -122,105 +122,105 @@ def bradley_correlation_2011(IM, T=None, flag_Ds=True):
rho: correlation coefficient
Note:
The valid range of T is 0.01s ~ 10.0s
- """
+ """ # noqa: D205, D400, D401
# PGA
- if IM == 'PGA':
+ if IM == 'PGA': # noqa: RET503
if flag_Ds:
return -0.442
- else:
+ else: # noqa: RET505
return -0.305
elif IM == 'PGV':
if flag_Ds:
return -0.259
- else:
+ else: # noqa: RET505
return -0.211
elif IM == 'ASI':
if flag_Ds:
return -0.411
- else:
+ else: # noqa: RET505
return -0.370
elif IM == 'SI':
if flag_Ds:
return -0.131
- else:
+ else: # noqa: RET505
return -0.079
elif IM == 'DSI':
if flag_Ds:
return 0.074
- else:
+ else: # noqa: RET505
return 0.163
elif IM == 'CAV':
if flag_Ds:
return 0.077
- else:
+ else: # noqa: RET505
return 0.122
elif IM == 'Ds595':
if flag_Ds:
return 0.843
- else:
+ else: # noqa: RET505
return None
elif IM == 'Sa':
if flag_Ds:
- if T < 0.09:
+ if T < 0.09: # noqa: PLR2004
a_p = -0.45
a_c = -0.39
b_p = 0.01
b_c = 0.09
- elif T < 0.30:
+ elif T < 0.30: # noqa: PLR2004
a_p = -0.39
a_c = -0.39
b_p = 0.09
b_c = 0.30
- elif T < 1.40:
+ elif T < 1.40: # noqa: PLR2004
a_p = -0.39
a_c = -0.06
b_p = 0.30
b_c = 1.40
- elif T < 6.50:
+ elif T < 6.50: # noqa: PLR2004
a_p = -0.06
a_c = 0.16
b_p = 1.40
b_c = 6.50
- elif T <= 10.0:
+ elif T <= 10.0: # noqa: PLR2004
a_p = 0.16
a_c = 0.00
b_p = 6.50
b_c = 10.00
- elif T < 0.04:
+ elif T < 0.04: # noqa: PLR2004
a_p = -0.41
a_c = -0.41
b_p = 0.01
b_c = 0.04
- elif T < 0.08:
+ elif T < 0.08: # noqa: PLR2004
a_p = -0.41
a_c = -0.38
b_p = 0.04
b_c = 0.08
- elif T < 0.26:
+ elif T < 0.26: # noqa: PLR2004
a_p = -0.38
a_c = -0.35
b_p = 0.08
b_c = 0.26
- elif T < 1.40:
+ elif T < 1.40: # noqa: PLR2004
a_p = -0.35
a_c = -0.02
b_p = 0.26
b_c = 1.40
- elif T <= 6.00:
+ elif T <= 6.00: # noqa: PLR2004
a_p = -0.02
a_c = 0.23
b_p = 1.40
b_c = 6.00
- elif T <= 10.00:
+ elif T <= 10.00: # noqa: PLR2004
a_p = 0.23
a_c = 0.02
b_p = 6.00
b_c = 10.0
rho = a_p + np.log(T / b_p) / np.log(b_c / b_p) * (a_c - a_p)
- return rho
+ return rho # noqa: RET504
-def jayaram_baker_correlation_2009(im, h, flag_clustering=False):
+def jayaram_baker_correlation_2009(im, h, flag_clustering=False): # noqa: FBT002
"""Computing intra-event correlation coeffcieint between Sa(T) at two sites
Reference:
Jayaram and Baker (2009) Correlation model for spatially distributed
@@ -232,16 +232,16 @@ def jayaram_baker_correlation_2009(im, h, flag_clustering=False):
the region (default: false)
Output:
rho: correlation between normalized intra-event residuals
- """
+ """ # noqa: D205, D400, D401
# parse period form im
try:
# for Sa
if im.startswith('SA'):
- T = float(im[3:-1])
+ T = float(im[3:-1]) # noqa: N806
elif im.startswith('PGA'):
- T = 0.0
+ T = 0.0 # noqa: N806
except ValueError:
- print(
+ print( # noqa: T201
f'CorrelationModel.jayaram_baker_correlation_2009: error - cannot handle {im}'
)
@@ -252,7 +252,7 @@ def jayaram_baker_correlation_2009(im, h, flag_clustering=False):
else:
b = 40.7 - 15.0 * T
rho = np.exp(-3.0 * h / b)
- return rho
+ return rho # noqa: RET504
def load_loth_baker_correlation_2013(datapath):
@@ -266,14 +266,14 @@ def load_loth_baker_correlation_2013(datapath):
B1: short-range coregionalization matrix
B2: long-range coregionalization matrix
B3: Nugget effect correlationalization matrix
- """
- B2 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B2.csv', header=0)
- B1 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B1.csv', header=0)
- B3 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B3.csv', header=0)
+ """ # noqa: D205, D400, D401
+ B2 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B2.csv', header=0) # noqa: N806
+ B1 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B1.csv', header=0) # noqa: N806
+ B3 = pd.read_csv(datapath + 'loth_baker_correlation_2013_B3.csv', header=0) # noqa: N806
return B1, B2, B3
-def compute_rho_loth_baker_correlation_2013(T1, T2, h, B1, B2, B3):
+def compute_rho_loth_baker_correlation_2013(T1, T2, h, B1, B2, B3): # noqa: N803
"""Computing intra-event correlation coeffcieint between Sa(Ti) and Sa(Tj)
at two sites
Reference:
@@ -290,7 +290,7 @@ def compute_rho_loth_baker_correlation_2013(T1, T2, h, B1, B2, B3):
rho: correlation between Sa(Ti) and Sa(Tj) at two sites
Note:
The valid range for T1 and T2 is 0.01s ~ 10.0s
- """
+ """ # noqa: D205, D400, D401
# Interpolation functions
f1 = interp2d(B1['Period (s)'], B1['Period (s)'], B1.iloc[:, 1:])
f2 = interp2d(B2['Period (s)'], B2['Period (s)'], B2.iloc[:, 1:])
@@ -300,13 +300,13 @@ def compute_rho_loth_baker_correlation_2013(T1, T2, h, B1, B2, B3):
b2 = f2(T1, T2)
b3 = f3(T1, T2)
# Covariance functions
- Ch = b1 * np.exp(-3.0 * h / 20.0) + b2 * np.exp(-3.0 * h / 70.0) + b3 * (h == 0)
+ Ch = b1 * np.exp(-3.0 * h / 20.0) + b2 * np.exp(-3.0 * h / 70.0) + b3 * (h == 0) # noqa: N806
# Correlation coefficient
rho = Ch
- return rho
+ return rho # noqa: RET504
-def loth_baker_correlation_2013(stations, im_name_list, num_simu):
+def loth_baker_correlation_2013(stations, im_name_list, num_simu): # noqa: C901
"""Simulating intra-event residuals
Reference:
Loth and Baker (2013) A spatial cross-correlation model of spectral
@@ -319,7 +319,7 @@ def loth_baker_correlation_2013(stations, im_name_list, num_simu):
residuals: intra-event residuals
Note:
The valid range for T1 and T2 is 0.01s ~ 10.0s
- """
+ """ # noqa: D205, D400, D401
# Parse periods from intensity measure list
periods = []
for cur_im in im_name_list:
@@ -328,13 +328,13 @@ def loth_baker_correlation_2013(stations, im_name_list, num_simu):
periods.append(float(cur_im[3:-1]))
elif cur_im.startswith('PGA'):
periods.append(0.0)
- except ValueError:
- print(
+ except ValueError: # noqa: PERF203
+ print( # noqa: T201
f'CorrelationModel.loth_baker_correlation_2013: error - cannot handle {cur_im}'
)
# Loading modeling coefficients
- B1, B2, B3 = load_loth_baker_correlation_2013(
- os.path.dirname(__file__) + '/data/'
+ B1, B2, B3 = load_loth_baker_correlation_2013( # noqa: N806
+ os.path.dirname(__file__) + '/data/' # noqa: PTH120
)
# Computing distance matrix
num_stations = len(stations)
@@ -346,7 +346,7 @@ def loth_baker_correlation_2013(stations, im_name_list, num_simu):
stn_dist[i, j] = get_distance_from_lat_lon(loc_i, loc_j)
# Creating a covariance matrices for each of the principal components
num_periods = len(periods)
- covMatrix = np.zeros((num_stations * num_periods, num_stations * num_periods))
+ covMatrix = np.zeros((num_stations * num_periods, num_stations * num_periods)) # noqa: N806
for i in range(num_periods):
for j in range(num_periods):
covMatrix[
@@ -364,7 +364,7 @@ def loth_baker_correlation_2013(stations, im_name_list, num_simu):
tmp = []
for j in range(num_stations):
for k in range(num_periods):
- tmp.append(residuals_raw[i, j + k * num_stations])
+ tmp.append(residuals_raw[i, j + k * num_stations]) # noqa: PERF401
residuals_reorder.append(tmp)
residuals_reorder = np.array(residuals_reorder)
residuals = (
@@ -373,7 +373,7 @@ def loth_baker_correlation_2013(stations, im_name_list, num_simu):
.swapaxes(1, 2)
)
# return
- return residuals
+ return residuals # noqa: RET504
def load_markhvida_ceferino_baker_correlation_2017(datapath):
@@ -388,18 +388,18 @@ def load_markhvida_ceferino_baker_correlation_2017(datapath):
B1: short-range coregionalization matrix
B2: long-range coregionalization matrix
B3: Nugget effect correlationalization matrix
- """
- MCB_model = pd.read_csv(
+ """ # noqa: D205, D400, D401
+ MCB_model = pd.read_csv( # noqa: N806
datapath + 'markhvida_ceferino_baker_correlation_2017_model_coeff.csv',
index_col=None,
header=0,
)
- MCB_pca = pd.read_csv(
+ MCB_pca = pd.read_csv( # noqa: N806
datapath + 'markhvida_ceferino_baker_correlation_2017_pca_coeff.csv',
index_col=None,
header=0,
)
- MCB_var = pd.read_csv(
+ MCB_var = pd.read_csv( # noqa: N806
datapath + 'markhvida_ceferino_baker_correlation_2017_var_scale.csv',
index_col=None,
header=0,
@@ -407,7 +407,7 @@ def load_markhvida_ceferino_baker_correlation_2017(datapath):
return MCB_model, MCB_pca, MCB_var
-def markhvida_ceferino_baker_correlation_2017(
+def markhvida_ceferino_baker_correlation_2017( # noqa: C901
stations,
im_name_list,
num_simu,
@@ -427,7 +427,7 @@ def markhvida_ceferino_baker_correlation_2017(
residuals: intra-event residuals
Note:
The valid range for T1 and T2 is 0.01s ~ 5.0s
- """
+ """ # noqa: D205, D400, D401
# Parse periods from intensity measure list
periods = []
for cur_im in im_name_list:
@@ -437,16 +437,16 @@ def markhvida_ceferino_baker_correlation_2017(
elif cur_im.startswith('PGA'):
periods.append(0.0)
else:
- raise ValueError(
- f'CorrelationModel Markhvida et al. (2017): error - cannot handle {cur_im}'
+ raise ValueError( # noqa: TRY003, TRY301
+ f'CorrelationModel Markhvida et al. (2017): error - cannot handle {cur_im}' # noqa: EM102
)
- except ValueError:
- print(
+ except ValueError: # noqa: PERF203
+ print( # noqa: T201
f'CorrelationModel.loth_baker_correlation_2013: error - cannot handle {cur_im}'
)
# Loading factors
- MCB_model, MCB_pca, MCB_var = load_markhvida_ceferino_baker_correlation_2017(
- os.path.dirname(__file__) + '/data/'
+ MCB_model, MCB_pca, MCB_var = load_markhvida_ceferino_baker_correlation_2017( # noqa: N806
+ os.path.dirname(__file__) + '/data/' # noqa: PTH120
)
c0 = MCB_model.loc[MCB_model['Type'] == 'c0']
c0 = c0[c0.keys()[1:]]
@@ -473,7 +473,7 @@ def markhvida_ceferino_baker_correlation_2017(
c1 = c1 / MCB_var.iloc[0, num_pc - 1]
c2 = c2 / MCB_var.iloc[0, num_pc - 1]
# Creating a covariance matrices for each of the principal components
- covMatrix = np.zeros((num_stations, num_stations, num_pc))
+ covMatrix = np.zeros((num_stations, num_stations, num_pc)) # noqa: N806
for i in range(num_pc):
if c1.iloc[0, i] == 0:
# nug
@@ -494,7 +494,7 @@ def markhvida_ceferino_baker_correlation_2017(
).T
# Interpolating model_coef by periods
interp_fun = interp1d(model_periods, model_coef, axis=0)
- model_Tmax = 5.0
+ model_Tmax = 5.0 # noqa: N806
simu_periods = [i for i in periods if i <= model_Tmax]
if (len(simu_periods) == 1) and (simu_periods[0] == 0):
# for PGA only (using 0.01 sec as the approxiamate)
@@ -511,8 +511,8 @@ def markhvida_ceferino_baker_correlation_2017(
)
# Appending residuals for periods greater than model_Tmax (fixing at 5.0)
if max(periods) > model_Tmax:
- Tmax_coef = interp_fun(model_Tmax)
- Tmax_residuals = np.empty([num_stations, 1, num_simu])
+ Tmax_coef = interp_fun(model_Tmax) # noqa: N806
+ Tmax_residuals = np.empty([num_stations, 1, num_simu]) # noqa: N806
for i in range(num_simu):
Tmax_residuals[:, :, i] = np.matmul(
residuals_pca[:, i, :], np.matrix(Tmax_coef).T
@@ -536,16 +536,16 @@ def load_du_ning_correlation_2021(datapath):
DN_model: model coeff.
DN_pca: pca coeff.
DN_var: var of pca
- """
- DN_model = pd.read_csv(
+ """ # noqa: D205, D400, D401
+ DN_model = pd.read_csv( # noqa: N806
datapath + 'du_ning_correlation_2021_model_coeff.csv',
index_col=None,
header=0,
)
- DN_pca = pd.read_csv(
+ DN_pca = pd.read_csv( # noqa: N806
datapath + 'du_ning_correlation_2021_pca_coeff.csv', index_col=None, header=0
)
- DN_var = pd.read_csv(
+ DN_var = pd.read_csv( # noqa: N806
datapath + 'du_ning_correlation_2021_var_scale.csv', index_col=None, header=0
)
return DN_model, DN_pca, DN_var
@@ -566,7 +566,7 @@ def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23):
residuals: intra-event residuals
Note:
The valid range for T1 and T2 is 0.01s ~ 5.0s
- """
+ """ # noqa: D205, D400, D401
# Parse periods_ims from intensity measure list
periods_ims = []
for cur_im in im_name_list:
@@ -575,8 +575,8 @@ def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23):
else:
periods_ims.append(cur_im)
# Loading factors
- DN_model, DN_pca, DN_var = load_du_ning_correlation_2021(
- os.path.dirname(__file__) + '/data/'
+ DN_model, DN_pca, DN_var = load_du_ning_correlation_2021( # noqa: N806
+ os.path.dirname(__file__) + '/data/' # noqa: PTH120
)
c1 = DN_model.loc[DN_model['Type'] == 'c1']
c1 = c1[c1.keys()[1:]]
@@ -610,7 +610,7 @@ def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23):
a1 = a1 / DN_var.iloc[0, num_pc - 1]
a2 = a2 / DN_var.iloc[0, num_pc - 1]
# Creating a covariance matrices for each of the principal components
- covMatrix = np.zeros((num_stations, num_stations, num_pc))
+ covMatrix = np.zeros((num_stations, num_stations, num_pc)) # noqa: N806
for i in range(num_pc):
if a1.iloc[0, i] == 0:
# nug
@@ -630,17 +630,17 @@ def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23):
mu, covMatrix[:, :, i], num_simu
).T
# Interpolating model_coef by periods
- pseudo_periods = [x for x in model_periods if type(x) == float] + [
+ pseudo_periods = [x for x in model_periods if type(x) == float] + [ # noqa: E721
ims_map[x]
for x in model_periods
- if type(x) == str
+ if type(x) == str # noqa: E721
]
interp_fun = interp1d(pseudo_periods, model_coef, axis=0)
- model_Tmax = 10.0
- simu_periods = [min(i, model_Tmax) for i in periods_ims if type(i) == float] + [
+ model_Tmax = 10.0 # noqa: N806
+ simu_periods = [min(i, model_Tmax) for i in periods_ims if type(i) == float] + [ # noqa: E721
ims_map[i]
for i in periods_ims
- if type(i) == str
+ if type(i) == str # noqa: E721
]
if (len(simu_periods) == 1) and (simu_periods[0] == 0):
# for PGA only (using 0.01 sec as the approximate)
@@ -660,7 +660,7 @@ def du_ning_correlation_2021(stations, im_name_list, num_simu, num_pc=23):
return residuals
-def baker_bradley_correlation_2017(im1=None, im2=None):
+def baker_bradley_correlation_2017(im1=None, im2=None): # noqa: C901
"""Correlation between Sa and other IMs
Baker, J. W., and Bradley, B. A. (2017). “Intensity measure correlations observed in
the NGA-West2 database, and dependence of correlations on rupture and site parameters.”
@@ -671,7 +671,7 @@ def baker_bradley_correlation_2017(im1=None, im2=None):
im2: 2nd intensity measure name
Output:
rho: correlation coefficient
- """
+ """ # noqa: D205, D400
# im map:
im_map = {'DS575H': 0, 'DS595H': 1, 'PGA': 2, 'PGV': 3}
@@ -683,7 +683,7 @@ def baker_bradley_correlation_2017(im1=None, im2=None):
else:
tmp_tag = im_map.get(im1.upper(), None)
if tmp_tag is None:
- print(
+ print( # noqa: T201
f'CorrelationModel.baker_bradley_correlation_2017: warning - return 0.0 for unknown {im1}'
)
return 0.0
@@ -695,7 +695,7 @@ def baker_bradley_correlation_2017(im1=None, im2=None):
else:
tmp_tag = im_map.get(im2.upper(), None)
if tmp_tag is None:
- print(
+ print( # noqa: T201
f'CorrelationModel.baker_bradley_correlation_2017: warning - return 0.0 for unknown {im2}'
)
return 0.0
@@ -720,7 +720,7 @@ def baker_bradley_correlation_2017(im1=None, im2=None):
# one Sa + one non-Sa
im_list.remove('SA')
im_tag = im_list[0]
- T = [x for x in period_list if x is not None][0]
+ T = [x for x in period_list if x is not None][0] # noqa: N806, RUF015
# modeling coefficients
a = [
[0.00, -0.45, -0.39, -0.39, -0.06, 0.16],
@@ -744,7 +744,7 @@ def baker_bradley_correlation_2017(im1=None, im2=None):
]
# rho
- if im_tag < 2:
+ if im_tag < 2: # noqa: PLR2004
for j in range(1, len(e[im_tag])):
if e[im_tag][j] >= T:
rho = a[im_tag][j] + (b[im_tag][j] - a[im_tag][j]) / np.log(
@@ -763,7 +763,7 @@ def baker_bradley_correlation_2017(im1=None, im2=None):
return rho
-def get_distance_from_lat_lon(site_loc1, site_loc2):
+def get_distance_from_lat_lon(site_loc1, site_loc2): # noqa: D103
# earth radius (km)
earth_radius_avg = 6371.0
# site lat and lon
@@ -788,4 +788,4 @@ def get_distance_from_lat_lon(site_loc1, site_loc2):
)
)
# return
- return dist
+ return dist # noqa: RET504
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py
index 2be9fd6f3..57f02c96b 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/SignificantDurationModel.py
@@ -1,4 +1,4 @@
-#
+# # noqa: N999, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -43,7 +43,7 @@
def abrahamson_silva_ds_1999(
magnitude=7.0,
distance=10.0,
- soil=True,
+ soil=True, # noqa: FBT002
duration_type='DS575H',
):
"""Significant duration model by Abrahamson and Silva (1996) Empirical ground motion
@@ -57,12 +57,12 @@ def abrahamson_silva_ds_1999(
Output:
log(ds_median): log(median) significant duration prediction
ds_sigma: logarithmic standard deviation of the prediction
- """
+ """ # noqa: D205, D400
# map the duration_type to integer key
dur_map = {'DS575H': 0, 'DS575V': 1, 'DS595H': 2, 'DS595V': 3}
dur_tag = dur_map.get(duration_type.upper(), None)
if dur_tag is None:
- print(
+ print( # noqa: T201
"SignificantDurationModel.abrahamson_silva_ds_1999: duration_type='DS575H','DS575V','DS595H','DS595V'?"
)
return None, None
@@ -74,7 +74,7 @@ def abrahamson_silva_ds_1999(
c1 = [0.805, 1.076, 0.805, 1.076]
c2 = [0.063, 0.107, 0.063, 0.107]
rc = [10, 10, 10, 10]
- Drat = [0.000, 0.000, 0.845, 0.646]
+ Drat = [0.000, 0.000, 0.845, 0.646] # noqa: N806
sigma = [0.55, 0.46, 0.49, 0.45]
# median
if distance > rc[dur_tag]:
@@ -132,12 +132,12 @@ def bommer_stafford_alarcon_ds_2009(
ds_sigma: logarithmic standard deviation of the prediction
ds_tau: within-event logarithmic standard deviation
ds_phi: between-event logarithmic standard deviation
- """
+ """ # noqa: D205, D400
# duration type map
dur_map = {'DS575H': 0, 'DS595H': 1}
dur_tag = dur_map.get(duration_type.upper(), None)
if dur_tag is None:
- print(
+ print( # noqa: T201
"SignificantDurationModel.bommer_stafford_alarcon_ds_2009: duration_type='DS575H','DS595H'?"
)
return None, None, None, None
@@ -150,10 +150,10 @@ def bommer_stafford_alarcon_ds_2009(
h1 = [-2.3316, 2.5000]
v1 = [-0.2900, -0.3478]
z1 = [-0.0522, -0.0365]
- tauCoeff = [0.3527, 0.3252]
- phiCoeff = [0.4304, 0.3460]
- sigma_c = [0.1729, 0.1114]
- sigma_Tgm = [0.5289, 0.4616]
+ tauCoeff = [0.3527, 0.3252] # noqa: N806
+ phiCoeff = [0.4304, 0.3460] # noqa: N806
+ sigma_c = [0.1729, 0.1114] # noqa: F841
+ sigma_Tgm = [0.5289, 0.4616] # noqa: N806
# median
ds_median = np.exp(
@@ -173,7 +173,7 @@ def bommer_stafford_alarcon_ds_2009(
return np.log(ds_median), ds_sigma, ds_tau, ds_phi
-def afshari_stewart_ds_2016(
+def afshari_stewart_ds_2016( # noqa: C901
magnitude=7.0,
distance=10.0,
vs30=760.0,
@@ -197,12 +197,12 @@ def afshari_stewart_ds_2016(
ds_sigma: logarithmic standard deviation of the prediction
ds_tau: within-event logarithmic standard deviation
ds_phi: between-event logarithmic standard deviation
- """
+ """ # noqa: D205, D400
# mechanism map
mech_map = {'unknown': 0, 'normal': 1, 'reverse': 2, 'strike-slip': 3}
mech_tag = mech_map.get(mechanism.lower(), None)
if mech_tag is None:
- print(
+ print( # noqa: T201
"SignificantDurationModel.afshari_stewart_ds_2016: mechanism='unknown','normal','reverse','strike-slip'?"
)
return None, None, None, None
@@ -210,7 +210,7 @@ def afshari_stewart_ds_2016(
reg_map = {'california': 0, 'japan': 1, 'other': 2}
reg_tag = reg_map.get(region.lower(), None)
if reg_tag is None:
- print(
+ print( # noqa: T201
"SignificantDurationModel.afshari_stewart_ds_2016: region='california', 'japan', 'other'?"
)
return None, None, None, None
@@ -218,14 +218,14 @@ def afshari_stewart_ds_2016(
dur_map = {'DS575H': 0, 'DS595H': 1, 'DS2080H': 2}
dur_tag = dur_map.get(duration_type.upper(), None)
if dur_tag is None:
- print(
+ print( # noqa: T201
"SignificantDurationModel.afshari_stewart_ds_2016: duration_type='DS575H','DS595H','DS2080H'?"
)
return None, None, None, None
# source coefficients
- M1 = [5.35, 5.20, 5.20]
- M2 = [7.15, 7.40, 7.40]
+ M1 = [5.35, 5.20, 5.20] # noqa: N806
+ M2 = [7.15, 7.40, 7.40] # noqa: N806
b0 = [
[1.2800, 2.1820, 0.8822],
[1.5550, 2.5410, 1.4090],
@@ -240,17 +240,17 @@ def afshari_stewart_ds_2016(
]
b2 = [0.9011, 0.9443, 0.7414]
b3 = [-1.684, -3.911, -3.164]
- Mstar = [6, 6, 6]
+ Mstar = [6, 6, 6] # noqa: N806
# path coefficients
c1 = [0.1159, 0.3165, 0.0646]
- RR1 = [10, 10, 10]
- RR2 = [50, 50, 50]
+ RR1 = [10, 10, 10] # noqa: N806
+ RR2 = [50, 50, 50] # noqa: N806
c2 = [0.1065, 0.2539, 0.0865]
c3 = [0.0682, 0.0932, 0.0373]
# site coefficients
c4 = [-0.2246, -0.3183, -0.4237]
- Vref = [368.2, 369.9, 369.6]
- V1 = [600, 600, 600]
+ Vref = [368.2, 369.9, 369.6] # noqa: N806
+ V1 = [600, 600, 600] # noqa: N806
c5 = [0.0006, 0.0006, 0.0005]
dz1ref = [200, 200, 200]
# standard deviation coefficients
@@ -269,67 +269,67 @@ def afshari_stewart_ds_2016(
-5.23 / 4 * np.log((vs30**4 + 412.39**4) / (1360**4 + 412.39**4))
)
# differential basin depth
- if z1 is None or z1 < 0 or reg_tag == 2:
+ if z1 is None or z1 < 0 or reg_tag == 2: # noqa: PLR2004
dz1 = 0
else:
dz1 = z1 - mu_z1
# source term
if magnitude < M1[dur_tag]:
- F_E = b0[mech_tag][dur_tag]
+ F_E = b0[mech_tag][dur_tag] # noqa: N806
else:
if magnitude < M2[dur_tag]:
- deltaSigma = np.exp(
+ deltaSigma = np.exp( # noqa: N806
b1[mech_tag][dur_tag] + b2[dur_tag] * (magnitude - Mstar[dur_tag])
)
else:
- deltaSigma = np.exp(
+ deltaSigma = np.exp( # noqa: N806
b1[mech_tag][dur_tag]
+ b2[dur_tag] * (M2[dur_tag] - Mstar[dur_tag])
+ b3[dur_tag] * (magnitude - M2[dur_tag])
)
- M_0 = 10 ** (1.5 * magnitude + 16.05)
+ M_0 = 10 ** (1.5 * magnitude + 16.05) # noqa: N806
f_0 = 4.9e6 * 3.2 * (deltaSigma / M_0) ** (1 / 3)
- F_E = 1 / f_0
+ F_E = 1 / f_0 # noqa: N806
# path term
if distance < RR1[dur_tag]:
- F_P = c1[dur_tag] * distance
+ F_P = c1[dur_tag] * distance # noqa: N806
elif distance < RR2[dur_tag]:
- F_P = c1[dur_tag] * RR1[dur_tag] + c2[dur_tag] * (distance - RR1[dur_tag])
+ F_P = c1[dur_tag] * RR1[dur_tag] + c2[dur_tag] * (distance - RR1[dur_tag]) # noqa: N806
else:
- F_P = (
+ F_P = ( # noqa: N806
c1[dur_tag] * RR1[dur_tag]
+ c2[dur_tag] * (RR2[dur_tag] - RR1[dur_tag])
+ c3[dur_tag] * (distance - RR2[dur_tag])
)
# F_deltaz term
if dz1 <= dz1ref[dur_tag]:
- F_deltaz = c5[dur_tag] * dz1
+ F_deltaz = c5[dur_tag] * dz1 # noqa: N806
else:
- F_deltaz = c5[dur_tag] * dz1ref[dur_tag]
+ F_deltaz = c5[dur_tag] * dz1ref[dur_tag] # noqa: N806
# site term
if vs30 < V1[dur_tag]:
- F_S = c4[dur_tag] * np.log(vs30 / Vref[dur_tag]) + F_deltaz
+ F_S = c4[dur_tag] * np.log(vs30 / Vref[dur_tag]) + F_deltaz # noqa: N806
else:
- F_S = c4[dur_tag] * np.log(V1[dur_tag] / Vref[dur_tag]) + F_deltaz
+ F_S = c4[dur_tag] * np.log(V1[dur_tag] / Vref[dur_tag]) + F_deltaz # noqa: N806
# median
ds_median = np.exp(np.log(F_E + F_P) + F_S)
# standard deviations
# between event
- if magnitude < 5.5:
+ if magnitude < 5.5: # noqa: PLR2004
ds_phi = phi1[dur_tag]
- elif magnitude < 5.75:
+ elif magnitude < 5.75: # noqa: PLR2004
ds_phi = phi1[dur_tag] + (phi2[dur_tag] - phi1[dur_tag]) * (
magnitude - 5.5
) / (5.75 - 5.5)
else:
ds_phi = phi2[dur_tag]
# within event
- if magnitude < 6.5:
+ if magnitude < 6.5: # noqa: PLR2004
ds_tau = tau1[dur_tag]
- elif magnitude < 7:
+ elif magnitude < 7: # noqa: PLR2004
ds_tau = tau1[dur_tag] + (tau2[dur_tag] - tau1[dur_tag]) * (
magnitude - 6.5
) / (7 - 6.5)
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/__init__.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/__init__.py
index 8285dcd58..317284e43 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/__init__.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/__init__.py
@@ -1,4 +1,4 @@
-#
+# # noqa: D104
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py
index 774f35ba4..a2090477b 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/gmpe/openSHAGMPE.py
@@ -1,4 +1,4 @@
-#
+# # noqa: N999, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -46,14 +46,14 @@
# Chiou and Young (2014)
-class chiou_youngs_2013:
- timeSetImt = 0
- timeCalc = 0
- supportedImt = None
+class chiou_youngs_2013: # noqa: D101
+ timeSetImt = 0 # noqa: N815
+ timeCalc = 0 # noqa: N815
+ supportedImt = None # noqa: N815
def __init__(self):
self.coeff = pd.read_csv(
- os.path.join(os.path.dirname(__file__), 'data', 'CY14.csv')
+ os.path.join(os.path.dirname(__file__), 'data', 'CY14.csv') # noqa: PTH118, PTH120
)
self.coeff.iloc[:-2, 0] = self.coeff.iloc[:-2, 0].apply(lambda x: float(x))
self.coeff = self.coeff.set_index('T')
@@ -72,7 +72,7 @@ def __init__(self):
self.B = np.power(1360, 4) + self.A
self.CRBsq = self.CRB * self.CRB
- def setIMT(self, imt):
+ def setIMT(self, imt): # noqa: N802, D102
if imt not in self.supportedImt:
sys.exit(f'The imt {imt} is not supported by Chiou and Young (2014)')
return False
@@ -110,18 +110,18 @@ def setIMT(self, imt):
return True
# Center zTop on the zTop-M relation -- Equations 4, 5
- def calcMwZtop(self, style, Mw):
- mzTop = 0.0
+ def calcMwZtop(self, style, Mw): # noqa: N802, N803, D102
+ mzTop = 0.0 # noqa: N806
if style == 'REVERSE':
- if Mw <= 5.849:
- mzTop = 2.704
+ if Mw <= 5.849: # noqa: PLR2004
+ mzTop = 2.704 # noqa: N806
else:
- mzTop = max(2.704 - 1.226 * (Mw - 5.849), 0)
+ mzTop = max(2.704 - 1.226 * (Mw - 5.849), 0) # noqa: N806
else:
- mzTop = 2.673 if (Mw <= 4.970) else max(2.673 - 1.136 * (Mw - 4.970), 0)
+ mzTop = 2.673 if (Mw <= 4.970) else max(2.673 - 1.136 * (Mw - 4.970), 0) # noqa: N806, PLR2004
return mzTop * mzTop
- def calcSAref(self, Mw, rJB, rRup, rX, dip, zTop, style):
+ def calcSAref(self, Mw, rJB, rRup, rX, dip, zTop, style): # noqa: N802, N803, D102
# Magnitude scaling
r1 = (
self.c1
@@ -137,10 +137,10 @@ def calcSAref(self, Mw, rJB, rRup, rX, dip, zTop, style):
gamma = self.cgamma1 + self.cgamma2 / np.cosh(max(Mw - self.cgamma3, 0.0))
r3 = self.dC4 * np.log(np.sqrt(rRup * rRup + self.CRBsq)) + rRup * gamma
# Scaling with other source variables
- coshM = np.cosh(2 * max(Mw - 4.5, 0))
- cosDelta = np.cos(dip * np.pi / 180.0)
+ coshM = np.cosh(2 * max(Mw - 4.5, 0)) # noqa: N806
+ cosDelta = np.cos(dip * np.pi / 180.0) # noqa: N806
# Center zTop on the zTop-M relation
- deltaZtop = zTop - self.calcMwZtop(style, Mw)
+ deltaZtop = zTop - self.calcMwZtop(style, Mw) # noqa: N806
r4 = (self.c7 + self.c7b / coshM) * deltaZtop + (
self.C11 + self.c11b / coshM
) * cosDelta * cosDelta
@@ -161,50 +161,50 @@ def calcSAref(self, Mw, rJB, rRup, rX, dip, zTop, style):
)
return np.exp(r1 + r2 + r3 + r4 + r5)
- def calcSoilNonLin(self, vs30):
+ def calcSoilNonLin(self, vs30): # noqa: N802, D102
exp1 = np.exp(self.phi3 * (min(vs30, 1130.0) - 360.0))
exp2 = np.exp(self.phi3 * (1130.0 - 360.0))
return self.phi2 * (exp1 - exp2)
- def calcZ1ref(self, vs30):
+ def calcZ1ref(self, vs30): # noqa: N802, D102
# -- Equation 18
- vsPow4 = vs30 * vs30 * vs30 * vs30
+ vsPow4 = vs30 * vs30 * vs30 * vs30 # noqa: N806
return np.exp(-7.15 / 4 * np.log((vsPow4 + self.A) / self.B)) / 1000.0 # km
- def calcDeltaZ1(self, z1p0, vs30):
+ def calcDeltaZ1(self, z1p0, vs30): # noqa: N802, D102
if np.isnan(z1p0):
return 0.0
return 1000.0 * (z1p0 - self.calcZ1ref(vs30))
# Mean ground motion model -- Equation 12
- def calcMean(self, vs30, z1p0, snl, saRef):
+ def calcMean(self, vs30, z1p0, snl, saRef): # noqa: N802, N803, D102
# Soil effect: linear response
sl = self.phi1 * min(np.log(vs30 / 1130.0), 0.0)
# Soil effect: nonlinear response (base passed in)
snl *= np.log((saRef + self.phi4) / self.phi4)
# Soil effect: sediment thickness
- dZ1 = self.calcDeltaZ1(z1p0, vs30)
+ dZ1 = self.calcDeltaZ1(z1p0, vs30) # noqa: N806
rkdepth = self.phi5 * (1.0 - np.exp(-dZ1 / self.PHI6))
return np.log(saRef) + sl + snl + rkdepth
- def calcNLOsq(self, snl, saRef):
- NL0 = snl * saRef / (saRef + self.phi4)
- NL0sq = (1 + NL0) * (1 + NL0)
- return NL0sq
+ def calcNLOsq(self, snl, saRef): # noqa: N802, N803, D102
+ NL0 = snl * saRef / (saRef + self.phi4) # noqa: N806
+ NL0sq = (1 + NL0) * (1 + NL0) # noqa: N806
+ return NL0sq # noqa: RET504
- def calcTauSq(self, NL0sq, mTest):
+ def calcTauSq(self, NL0sq, mTest): # noqa: N802, N803, D102
tau = self.tau1 + (self.tau2 - self.tau1) / 1.5 * mTest
- tauSq = tau * tau * NL0sq
- return tauSq
+ tauSq = tau * tau * NL0sq # noqa: N806
+ return tauSq # noqa: RET504
- def calcPhiSq(self, vsInf, NL0sq, mTest):
- sigmaNL0 = self.sigma1 + (self.sigma2 - self.sigma1) / 1.5 * mTest
- vsTerm = self.sigma3 if vsInf else 0.7
- sigmaNL0 *= np.sqrt(vsTerm + NL0sq)
- phiSq = sigmaNL0 * sigmaNL0
- return phiSq
+ def calcPhiSq(self, vsInf, NL0sq, mTest): # noqa: N802, N803, D102
+ sigmaNL0 = self.sigma1 + (self.sigma2 - self.sigma1) / 1.5 * mTest # noqa: N806
+ vsTerm = self.sigma3 if vsInf else 0.7 # noqa: N806
+ sigmaNL0 *= np.sqrt(vsTerm + NL0sq) # noqa: N806
+ phiSq = sigmaNL0 * sigmaNL0 # noqa: N806
+ return phiSq # noqa: RET504
- def calc(self, Mw, rJB, rRup, rX, dip, zTop, vs30, vsInf, z1p0, style):
+ def calc(self, Mw, rJB, rRup, rX, dip, zTop, vs30, vsInf, z1p0, style): # noqa: N803
"""Preliminary implementation of the Chiou & Youngs (2013) next generation
attenuation relationship developed as part of NGA West II.
Input
@@ -224,54 +224,54 @@ def calc(self, Mw, rJB, rRup, rX, dip, zTop, vs30, vsInf, z1p0, style):
TotalStdDev
InterEvStdDev
IntraEvStdDev
- """
- saRef = self.calcSAref(Mw, rJB, rRup, rX, dip, zTop, style)
- soilNonLin = self.calcSoilNonLin(vs30)
+ """ # noqa: D205, D400
+ saRef = self.calcSAref(Mw, rJB, rRup, rX, dip, zTop, style) # noqa: N806
+ soilNonLin = self.calcSoilNonLin(vs30) # noqa: N806
mean = self.calcMean(vs30, z1p0, soilNonLin, saRef)
# Aleatory uncertainty model -- Equation 3.9
# Response Term - linear vs. non-linear
- NL0sq = self.calcNLOsq(soilNonLin, saRef)
+ NL0sq = self.calcNLOsq(soilNonLin, saRef) # noqa: N806
# Magnitude thresholds
- mTest = min(max(Mw, 5.0), 6.5) - 5.0
+ mTest = min(max(Mw, 5.0), 6.5) - 5.0 # noqa: N806
# Inter-event Term
- tauSq = self.calcTauSq(NL0sq, mTest)
+ tauSq = self.calcTauSq(NL0sq, mTest) # noqa: N806
# Intra-event term
- phiSq = self.calcPhiSq(vsInf, NL0sq, mTest)
+ phiSq = self.calcPhiSq(vsInf, NL0sq, mTest) # noqa: N806
- stdDev = np.sqrt(tauSq + phiSq)
+ stdDev = np.sqrt(tauSq + phiSq) # noqa: N806
return mean, stdDev, np.sqrt(tauSq), np.sqrt(phiSq)
# https://github.com/opensha/opensha/blob/master/src/main/java/org/opensha/sha/imr/attenRelImpl/ngaw2/NGAW2_Wrapper.java#L220
- def getFaultFromRake(self, rake):
- if rake >= 135 or rake <= -135 or (rake >= -45 and rake <= 45):
+ def getFaultFromRake(self, rake): # noqa: N802, D102
+ if rake >= 135 or rake <= -135 or (rake >= -45 and rake <= 45): # noqa: PLR2004
return 'STRIKE_SLIP'
- elif rake >= 45 and rake <= 135:
+ elif rake >= 45 and rake <= 135: # noqa: RET505, PLR2004
return 'REVERSE'
else:
return 'NORMAL'
- def get_IM(self, Mw, site_rup_dict, site_info, im_info):
- vsInf = bool(site_info['vsInferred'])
+ def get_IM(self, Mw, site_rup_dict, site_info, im_info): # noqa: N802, N803, D102
+ vsInf = bool(site_info['vsInferred']) # noqa: N806
style = self.getFaultFromRake(site_rup_dict['aveRake'])
if 'SA' in im_info['Type']:
- cur_T = im_info.get('Periods', None)
+ cur_T = im_info.get('Periods', None) # noqa: N806
elif im_info['Type'] == 'PGA':
- cur_T = ['PGA']
+ cur_T = ['PGA'] # noqa: N806
elif im_info['Type'] == 'PGV':
- cur_T = ['PGV']
+ cur_T = ['PGV'] # noqa: N806
else:
- print(f'The IM type {im_info["Type"]} is not supported')
- meanList = []
- stdDevList = []
- InterEvStdDevList = []
- IntraEvStdDevList = []
- for Tj in cur_T:
+ print(f'The IM type {im_info["Type"]} is not supported') # noqa: T201
+ meanList = [] # noqa: N806
+ stdDevList = [] # noqa: N806
+ InterEvStdDevList = [] # noqa: N806
+ IntraEvStdDevList = [] # noqa: N806
+ for Tj in cur_T: # noqa: N806
start = time.process_time_ns()
self.setIMT(Tj)
self.timeSetImt += time.process_time_ns() - start
start = time.process_time_ns()
- mean, stdDev, InterEvStdDev, IntraEvStdDev = self.calc(
+ mean, stdDev, InterEvStdDev, IntraEvStdDev = self.calc( # noqa: N806
Mw,
site_info['rJB'],
site_info['rRup'],
@@ -288,13 +288,13 @@ def get_IM(self, Mw, site_rup_dict, site_info, im_info):
stdDevList.append(stdDev)
InterEvStdDevList.append(InterEvStdDev)
IntraEvStdDevList.append(IntraEvStdDev)
- saResult = {
+ saResult = { # noqa: N806
'Mean': meanList,
'TotalStdDev': stdDevList,
'InterEvStdDev': InterEvStdDevList,
'IntraEvStdDev': IntraEvStdDevList,
}
- return saResult
+ return saResult # noqa: RET504
# Station
# if station_info['Type'] == 'SiteList':
@@ -303,14 +303,14 @@ def get_IM(self, Mw, site_rup_dict, site_info, im_info):
# Abrahamson, Silva, and Kamai (2014)
-class abrahamson_silva_kamai_2014:
- timeSetImt = 0
- timeCalc = 0
- supportedImt = None
+class abrahamson_silva_kamai_2014: # noqa: D101
+ timeSetImt = 0 # noqa: N815
+ timeCalc = 0 # noqa: N815
+ supportedImt = None # noqa: N815
def __init__(self):
self.coeff = pd.read_csv(
- os.path.join(os.path.dirname(__file__), 'data', 'ASK14.csv')
+ os.path.join(os.path.dirname(__file__), 'data', 'ASK14.csv') # noqa: PTH118, PTH120
)
self.coeff.iloc[:-2, 0] = self.coeff.iloc[:-2, 0].apply(lambda x: float(x))
self.coeff = self.coeff.set_index('T')
@@ -335,7 +335,7 @@ def __init__(self):
self.H3 = -0.75
self.PHI_AMP_SQ = 0.16
- def setIMT(self, imt):
+ def setIMT(self, imt): # noqa: N802, D102
if imt not in self.supportedImt:
sys.exit(
f'The imt {imt} is not supported by Abrahamson, Silva, and Kamai (2014)'
@@ -369,79 +369,79 @@ def setIMT(self, imt):
self.M1 = self.coeff['M1'][imt]
self.Vlin = self.coeff['Vlin'][imt]
- def getV1(self):
+ def getV1(self): # noqa: N802, D102
try:
- if self.imt == 'PGA' or self.imt == 'PGV':
+ if self.imt == 'PGA' or self.imt == 'PGV': # noqa: PLR1714
return 1500.0
- if self.imt >= 3.0:
+ if self.imt >= 3.0: # noqa: PLR2004
return 800.0
- if self.imt > 0.5:
+ if self.imt > 0.5: # noqa: PLR2004
return np.exp(-0.35 * np.log(self.imt / 0.5) + np.log(1500.0))
- return 1500.0
- except:
+ return 1500.0 # noqa: TRY300
+ except: # noqa: E722
return 1500.0
- def calcZ1ref(self, vs30):
- vsPow4 = vs30 * vs30 * vs30 * vs30
+ def calcZ1ref(self, vs30): # noqa: N802, D102
+ vsPow4 = vs30 * vs30 * vs30 * vs30 # noqa: N806
return np.exp(-7.67 / 4.0 * np.log((vsPow4 + self.A) / self.B)) / 1000.0
- def calcSoilTerm(self, vs30, z1p0):
+ def calcSoilTerm(self, vs30, z1p0): # noqa: N802, D102
if np.isnan(z1p0):
return 0.0
z1ref = self.calcZ1ref(vs30)
- vsCoeff = np.array([self.a43, self.a44, self.a45, self.a46, self.a46])
- VS_BINS = np.array([150.0, 250.0, 400.0, 700.0, 1000.0])
+ vsCoeff = np.array([self.a43, self.a44, self.a45, self.a46, self.a46]) # noqa: N806
+ VS_BINS = np.array([150.0, 250.0, 400.0, 700.0, 1000.0]) # noqa: N806
z1c = np.interp(vs30, VS_BINS, vsCoeff)
return z1c * np.log((z1p0 + 0.01) / (z1ref + 0.01))
- def getPhiA(self, Mw, s1, s2):
- if Mw < 4.0:
+ def getPhiA(self, Mw, s1, s2): # noqa: N802, N803, D102
+ if Mw < 4.0: # noqa: PLR2004
return s1
- if Mw > 6.0:
+ if Mw > 6.0: # noqa: PLR2004
return s2
- else:
+ else: # noqa: RET505
return s1 + ((s2 - s1) / 2) * (Mw - 4.0)
- def getTauA(self, Mw, s3, s4):
- if Mw < 5.0:
+ def getTauA(self, Mw, s3, s4): # noqa: N802, N803, D102
+ if Mw < 5.0: # noqa: PLR2004
return s3
- if Mw > 7.0:
+ if Mw > 7.0: # noqa: PLR2004
return s4
return s3 + ((s4 - s3) / 2) * (Mw - 5.0)
- def get_dAmp(self, b, c, vLin, vs30, saRock):
+ def get_dAmp(self, b, c, vLin, vs30, saRock): # noqa: N802, N803, D102
if vs30 >= vLin:
return 0.0
return (-b * saRock) / (saRock + c) + (b * saRock) / (
saRock + c * np.power(vs30 / vLin, self.N)
)
- def calcValues(
+ def calcValues( # noqa: C901, N802, D102
self,
- Mw,
- rJB,
- rRup,
- rX,
- rY0,
+ Mw, # noqa: N803
+ rJB, # noqa: N803
+ rRup, # noqa: N803
+ rX, # noqa: N803
+ rY0, # noqa: ARG002, N803
dip,
width,
- zTop,
+ zTop, # noqa: N803
vs30,
- vsInferred,
+ vsInferred, # noqa: N803
z1p0,
style,
):
- if Mw > 5:
+ if Mw > 5: # noqa: PLR2004
c4mag = self.C4
- elif Mw > 4:
+ elif Mw > 4: # noqa: PLR2004
c4mag = self.C4 - (self.C4 - 1.0) * (5.0 - Mw)
else:
c4mag = 1.0
# -- Equation 3
- R = np.sqrt(rRup * rRup + c4mag * c4mag)
+ R = np.sqrt(rRup * rRup + c4mag * c4mag) # noqa: N806
# -- Equation 2
- MaxMwSq = (8.5 - Mw) * (8.5 - Mw)
- MwM1 = Mw - self.M1
+ MaxMwSq = (8.5 - Mw) * (8.5 - Mw) # noqa: N806
+ MwM1 = Mw - self.M1 # noqa: N806
f1 = self.a1 + self.a17 * rRup
if Mw > self.M1:
@@ -457,9 +457,9 @@ def calcValues(
+ (self.a2 + self.A3 * MwM1) * np.log(R)
)
else:
- M2M1 = self.M2 - self.M1
- MaxM2Sq = (8.5 - self.M2) * (8.5 - self.M2)
- MwM2 = Mw - self.M2
+ M2M1 = self.M2 - self.M1 # noqa: N806
+ MaxM2Sq = (8.5 - self.M2) * (8.5 - self.M2) # noqa: N806
+ MwM2 = Mw - self.M2 # noqa: N806
f1 += (
self.A4 * M2M1
+ self.a8 * MaxM2Sq
@@ -469,32 +469,32 @@ def calcValues(
# Hanging Wall Model
f4 = 0.0
- if rJB < 30 and rX >= 0.0 and Mw > 5.5 and zTop <= 10.0:
- T1 = (90.0 - dip) / 45 if (dip > 30.0) else 1.33333333
- dM = Mw - 6.5
- T2 = (
+ if rJB < 30 and rX >= 0.0 and Mw > 5.5 and zTop <= 10.0: # noqa: PLR2004
+ T1 = (90.0 - dip) / 45 if (dip > 30.0) else 1.33333333 # noqa: N806, PLR2004
+ dM = Mw - 6.5 # noqa: N806
+ T2 = ( # noqa: N806
1 + self.A2_HW * dM
- if Mw >= 6.5
+ if Mw >= 6.5 # noqa: PLR2004
else 1 + self.A2_HW * dM - (1 - self.A2_HW) * dM * dM
)
- T3 = 0.0
+ T3 = 0.0 # noqa: N806
r1 = width * np.cos(dip * np.pi / 180.0)
r2 = 3 * r1
if rX <= r1:
- rXr1 = rX / r1
- T3 = self.H1 + self.H2 * rXr1 + self.H3 * rXr1 * rXr1
+ rXr1 = rX / r1 # noqa: N806
+ T3 = self.H1 + self.H2 * rXr1 + self.H3 * rXr1 * rXr1 # noqa: N806
elif rX <= r2:
- T3 = 1 - (rX - r1) / (r2 - r1)
- T4 = 1 - (zTop * zTop) / 100.0
- T5 = 1.0 if rJB == 0.0 else 1 - rJB / 30.0
+ T3 = 1 - (rX - r1) / (r2 - r1) # noqa: N806
+ T4 = 1 - (zTop * zTop) / 100.0 # noqa: N806
+ T5 = 1.0 if rJB == 0.0 else 1 - rJB / 30.0 # noqa: N806
f4 = self.a13 * T1 * T2 * T3 * T4 * T5
f6 = self.a15
- if zTop < 20.0:
+ if zTop < 20.0: # noqa: PLR2004
f6 *= zTop / 20.0
if style == 'NORMAL':
- if Mw > 5.0:
+ if Mw > 5.0: # noqa: PLR2004
f78 = self.a12
- elif Mw >= 4.0:
+ elif Mw >= 4.0: # noqa: PLR2004
f78 = self.a12 * (Mw - 4)
else:
f78 = 0.0
@@ -509,14 +509,14 @@ def calcValues(
vs30s = min(v1, vs30) # -- Equation 8
# Site term -- Equation 7
- saRock = 0.0 # calc Sa1180 (rock reference) if necessary
+ saRock = 0.0 # calc Sa1180 (rock reference) if necessary # noqa: N806
if vs30 < self.Vlin:
if v1 > self.VS_RK:
vs30s_rk = self.VS_RK
else:
vs30s_rk = v1
f5_rk = (self.a10 + self.b * self.N) * np.log(vs30s_rk / self.Vlin)
- saRock = np.exp(f1 + f78 + f5_rk + f4 + f6)
+ saRock = np.exp(f1 + f78 + f5_rk + f4 + f6) # noqa: N806
f5 = (
self.a10 * np.log(vs30s / self.Vlin)
- self.b * np.log(saRock + self.c)
@@ -530,55 +530,55 @@ def calcValues(
# ****** Aleatory uncertainty model ******
# Intra-event term -- Equation 24
if vsInferred:
- phiAsq = self.getPhiA(Mw, self.s1e, self.s2e)
+ phiAsq = self.getPhiA(Mw, self.s1e, self.s2e) # noqa: N806
else:
- phiAsq = self.getPhiA(Mw, self.s1m, self.s2m)
- phiAsq *= phiAsq
+ phiAsq = self.getPhiA(Mw, self.s1m, self.s2m) # noqa: N806
+ phiAsq *= phiAsq # noqa: N806
# Inter-event term -- Equation 25
- tauB = self.getTauA(Mw, self.s3, self.s4)
+ tauB = self.getTauA(Mw, self.s3, self.s4) # noqa: N806
# Intra-event term with site amp variability removed -- Equation 27
- phiBsq = phiAsq - self.PHI_AMP_SQ
+ phiBsq = phiAsq - self.PHI_AMP_SQ # noqa: N806
# Partial deriv. of ln(soil amp) w.r.t. ln(SA1180) -- Equation 30
# saRock subject to same vs30 < Vlin test as in mean model
- dAmp_p1 = self.get_dAmp(self.b, self.c, self.Vlin, vs30, saRock) + 1.0
+ dAmp_p1 = self.get_dAmp(self.b, self.c, self.Vlin, vs30, saRock) + 1.0 # noqa: N806
# phi squared, with non-linear effects -- Equation 28
- phiSq = phiBsq * dAmp_p1 * dAmp_p1 + self.PHI_AMP_SQ
+ phiSq = phiBsq * dAmp_p1 * dAmp_p1 + self.PHI_AMP_SQ # noqa: N806
# tau squared, with non-linear effects -- Equation 29
tau = tauB * dAmp_p1
# total std dev
- stdDev = np.sqrt(phiSq + tau * tau)
+ stdDev = np.sqrt(phiSq + tau * tau) # noqa: N806
return mean, stdDev, np.sqrt(phiSq), tau
- def getFaultFromRake(self, rake):
- if rake >= 135 or rake <= -135 or (rake >= -45 and rake <= 45):
+ def getFaultFromRake(self, rake): # noqa: N802, D102
+ if rake >= 135 or rake <= -135 or (rake >= -45 and rake <= 45): # noqa: PLR2004
return 'STRIKE_SLIP'
- elif rake >= 45 and rake <= 135:
+ elif rake >= 45 and rake <= 135: # noqa: RET505, PLR2004
return 'REVERSE'
else:
return 'NORMAL'
- def get_IM(self, Mw, site_rup_dict, site_info, im_info):
- vsInf = bool(site_info['vsInferred'])
+ def get_IM(self, Mw, site_rup_dict, site_info, im_info): # noqa: N802, N803, D102
+ vsInf = bool(site_info['vsInferred']) # noqa: N806
style = self.getFaultFromRake(site_rup_dict['aveRake'])
if 'SA' in im_info['Type']:
- cur_T = im_info.get('Periods', None)
+ cur_T = im_info.get('Periods', None) # noqa: N806
elif im_info['Type'] == 'PGA':
- cur_T = ['PGA']
+ cur_T = ['PGA'] # noqa: N806
elif im_info['Type'] == 'PGV':
- cur_T = ['PGV']
+ cur_T = ['PGV'] # noqa: N806
else:
- print(f'The IM type {im_info["Type"]} is not supported')
- meanList = []
- stdDevList = []
- InterEvStdDevList = []
- IntraEvStdDevList = []
- for Tj in cur_T:
+ print(f'The IM type {im_info["Type"]} is not supported') # noqa: T201
+ meanList = [] # noqa: N806
+ stdDevList = [] # noqa: N806
+ InterEvStdDevList = [] # noqa: N806
+ IntraEvStdDevList = [] # noqa: N806
+ for Tj in cur_T: # noqa: N806
start = time.process_time_ns()
self.setIMT(Tj)
self.timeSetImt += time.process_time_ns() - start
start = time.process_time_ns()
- mean, stdDev, InterEvStdDev, IntraEvStdDev = self.calcValues(
+ mean, stdDev, InterEvStdDev, IntraEvStdDev = self.calcValues( # noqa: N806
Mw,
site_info['rJB'],
site_info['rRup'],
@@ -597,24 +597,24 @@ def get_IM(self, Mw, site_rup_dict, site_info, im_info):
stdDevList.append(stdDev)
InterEvStdDevList.append(InterEvStdDev)
IntraEvStdDevList.append(IntraEvStdDev)
- saResult = {
+ saResult = { # noqa: N806
'Mean': meanList,
'TotalStdDev': stdDevList,
'InterEvStdDev': InterEvStdDevList,
'IntraEvStdDev': IntraEvStdDevList,
}
- return saResult
+ return saResult # noqa: RET504
# Boore, Stewart, Seyhan, Atkinson (2014)
-class boore_etal_2014:
- timeSetImt = 0
- timeCalc = 0
- supportedImt = None
+class boore_etal_2014: # noqa: D101
+ timeSetImt = 0 # noqa: N815
+ timeCalc = 0 # noqa: N815
+ supportedImt = None # noqa: N815
def __init__(self):
self.coeff = pd.read_csv(
- os.path.join(os.path.dirname(__file__), 'data', 'BSSA14.csv')
+ os.path.join(os.path.dirname(__file__), 'data', 'BSSA14.csv') # noqa: PTH118, PTH120
)
self.coeff.iloc[:-2, 0] = self.coeff.iloc[:-2, 0].apply(lambda x: float(x))
self.coeff = self.coeff.set_index('T')
@@ -634,7 +634,7 @@ def __init__(self):
self.V2 = 300
self.imt = 'PGA'
- def setIMT(self, imt):
+ def setIMT(self, imt): # noqa: N802, D102
if imt not in self.supportedImt:
sys.exit(
f'The imt {imt} is not supported by Boore, Stewart, Seyhan & Atkinson (2014)'
@@ -668,109 +668,109 @@ def setIMT(self, imt):
self.tau1 = self.coeff['tau1'][imt]
self.tau2 = self.coeff['tau2'][imt]
- def getFaultFromRake(self, rake):
- if rake >= 135 or rake <= -135 or (rake >= -45 and rake <= 45):
+ def getFaultFromRake(self, rake): # noqa: N802, D102
+ if rake >= 135 or rake <= -135 or (rake >= -45 and rake <= 45): # noqa: PLR2004
return 'STRIKE_SLIP'
- elif rake >= 45 and rake <= 135:
+ elif rake >= 45 and rake <= 135: # noqa: RET505, PLR2004
return 'REVERSE'
else:
return 'NORMAL'
- def calcSourceTerm(self, Mw, style):
+ def calcSourceTerm(self, Mw, style): # noqa: N802, N803, D102
if style == 'STRIKE_SLIP':
- Fe = self.e1
+ Fe = self.e1 # noqa: N806
elif style == 'REVERSE':
- Fe = self.e3
+ Fe = self.e3 # noqa: N806
elif style == 'NORMAL':
- Fe = self.e2
+ Fe = self.e2 # noqa: N806
else:
- Fe = self.e0
- MwMh = Mw - self.Mh
+ Fe = self.e0 # noqa: N806
+ MwMh = Mw - self.Mh # noqa: N806
if Mw <= self.Mh:
- Fe = Fe + self.e4 * MwMh + self.e5 * MwMh * MwMh
+ Fe = Fe + self.e4 * MwMh + self.e5 * MwMh * MwMh # noqa: N806
else:
- Fe = Fe + self.e6 * MwMh
+ Fe = Fe + self.e6 * MwMh # noqa: N806
return Fe
- def calcPathTerm(self, Mw, R):
+ def calcPathTerm(self, Mw, R): # noqa: N802, N803, D102
return (self.c1 + self.c2 * (Mw - self.M_REF)) * np.log(R / self.R_REF) + (
self.c3 + self.DC3_CA_TW
) * (R - self.R_REF)
- def calcPGArock(self, Mw, rJB, style):
- FePGA = self.calcSourceTerm(Mw, style)
- R = np.sqrt(rJB * rJB + self.h * self.h)
- FpPGA = self.calcPathTerm(Mw, R)
+ def calcPGArock(self, Mw, rJB, style): # noqa: N802, N803, D102
+ FePGA = self.calcSourceTerm(Mw, style) # noqa: N806
+ R = np.sqrt(rJB * rJB + self.h * self.h) # noqa: N806
+ FpPGA = self.calcPathTerm(Mw, R) # noqa: N806
return np.exp(FePGA + FpPGA)
- def calcLnFlin(self, vs30):
- vsLin = min(vs30, self.Vc)
- lnFlin = self.c * np.log(vsLin / self.V_REF)
- return lnFlin
+ def calcLnFlin(self, vs30): # noqa: N802, D102
+ vsLin = min(vs30, self.Vc) # noqa: N806
+ lnFlin = self.c * np.log(vsLin / self.V_REF) # noqa: N806
+ return lnFlin # noqa: RET504
- def calcF2(self, vs30):
+ def calcF2(self, vs30): # noqa: N802, D102
f2 = self.f4 * (
np.exp(self.f5 * (min(vs30, 760.0) - 360.0))
- np.exp(self.f5 * (760.0 - 360.0))
)
- return f2
+ return f2 # noqa: RET504
- def calcFdz1(self, vs30, z1p0):
- DZ1 = self.calcDeltaZ1(z1p0, vs30)
- if self.imt != 'PGA' and self.imt != 'PGV' and self.imt >= 0.65:
+ def calcFdz1(self, vs30, z1p0): # noqa: N802, D102
+ DZ1 = self.calcDeltaZ1(z1p0, vs30) # noqa: N806
+ if self.imt != 'PGA' and self.imt != 'PGV' and self.imt >= 0.65: # noqa: PLR1714, PLR2004
if (self.f7 / self.f6) >= DZ1:
- Fdz1 = self.f6 * DZ1
+ Fdz1 = self.f6 * DZ1 # noqa: N806
else:
- Fdz1 = self.f7
+ Fdz1 = self.f7 # noqa: N806
else:
- Fdz1 = 0.0
+ Fdz1 = 0.0 # noqa: N806
return Fdz1
- def calcDeltaZ1(self, z1p0, vs30):
+ def calcDeltaZ1(self, z1p0, vs30): # noqa: N802, D102
if np.isnan(z1p0):
return 0.0
return z1p0 - self.calcZ1ref(vs30)
- def calcZ1ref(self, vs30):
- vsPow4 = np.power(vs30, 4)
+ def calcZ1ref(self, vs30): # noqa: N802, D102
+ vsPow4 = np.power(vs30, 4) # noqa: N806
return np.exp(-7.15 / 4.0 * np.log((vsPow4 + self.A) / self.B)) / 1000.0
- def calcMean(self, Mw, rJB, vs30, z1p0, style, pgaRock):
- Fe = self.calcSourceTerm(Mw, style)
- R = np.sqrt(rJB * rJB + self.h * self.h)
- Fp = self.calcPathTerm(Mw, R)
- lnFlin = self.calcLnFlin(vs30)
+ def calcMean(self, Mw, rJB, vs30, z1p0, style, pgaRock): # noqa: N802, N803, D102
+ Fe = self.calcSourceTerm(Mw, style) # noqa: N806
+ R = np.sqrt(rJB * rJB + self.h * self.h) # noqa: N806
+ Fp = self.calcPathTerm(Mw, R) # noqa: N806
+ lnFlin = self.calcLnFlin(vs30) # noqa: N806
f2 = self.calcF2(vs30)
- lnFnl = self.F1 + f2 * np.log((pgaRock + self.F3) / self.F3)
- Fdz1 = self.calcFdz1(vs30, z1p0)
- Fs = lnFlin + lnFnl + Fdz1
+ lnFnl = self.F1 + f2 * np.log((pgaRock + self.F3) / self.F3) # noqa: N806
+ Fdz1 = self.calcFdz1(vs30, z1p0) # noqa: N806
+ Fs = lnFlin + lnFnl + Fdz1 # noqa: N806
return Fe + Fp + Fs
- def calcPhi(self, Mw, rJB, vs30):
- if Mw >= 5.5:
- phiM = self.phi2
- elif Mw <= 4.5:
- phiM = self.phi1
+ def calcPhi(self, Mw, rJB, vs30): # noqa: N802, N803, D102
+ if Mw >= 5.5: # noqa: PLR2004
+ phiM = self.phi2 # noqa: N806
+ elif Mw <= 4.5: # noqa: PLR2004
+ phiM = self.phi1 # noqa: N806
else:
- phiM = self.phi1 + (self.phi2 - self.phi1) * (Mw - 4.5)
- phiMR = phiM
+ phiM = self.phi1 + (self.phi2 - self.phi1) * (Mw - 4.5) # noqa: N806
+ phiMR = phiM # noqa: N806
if rJB > self.R2:
- phiMR += self.dPhiR
+ phiMR += self.dPhiR # noqa: N806
elif rJB > self.R1:
- phiMR += self.dPhiR * (np.log(rJB / self.R1) / np.log(self.R2 / self.R1))
- phiMRV = phiMR
+ phiMR += self.dPhiR * (np.log(rJB / self.R1) / np.log(self.R2 / self.R1)) # noqa: N806
+ phiMRV = phiMR # noqa: N806
if vs30 <= self.V1:
- phiMRV -= self.dPhiV
+ phiMRV -= self.dPhiV # noqa: N806
elif vs30 < self.V2:
- phiMRV -= self.dPhiV * (
+ phiMRV -= self.dPhiV * ( # noqa: N806
np.log(self.V2 / vs30) / np.log(self.V2 / self.V1)
)
return phiMRV
- def calcTau(self, Mw):
- if Mw >= 5.5:
+ def calcTau(self, Mw): # noqa: N802, N803, D102
+ if Mw >= 5.5: # noqa: PLR2004
tau = self.tau2
- elif Mw <= 4.5:
+ elif Mw <= 4.5: # noqa: PLR2004
tau = self.tau1
else:
tau = self.tau1 + (self.tau2 - self.tau1) * (Mw - 4.5)
@@ -780,41 +780,41 @@ def calcTau(self, Mw):
# tau = self.calcTau(Mw)
# phiMRV = self.calcPhi(Mw, rJB, vs30)
# return np.sqrt(phiMRV * phiMRV + tau * tau)
- def calcStdDev(self, phiMRV, tau):
+ def calcStdDev(self, phiMRV, tau): # noqa: N802, N803, D102
return np.sqrt(phiMRV * phiMRV + tau * tau)
- def calc(self, Mw, rJB, vs30, z1p0, style):
+ def calc(self, Mw, rJB, vs30, z1p0, style): # noqa: N803, D102
imt_tmp = self.imt
self.setIMT('PGA')
- pgaRock = self.calcPGArock(Mw, rJB, style)
+ pgaRock = self.calcPGArock(Mw, rJB, style) # noqa: N806
self.setIMT(imt_tmp)
mean = self.calcMean(Mw, rJB, vs30, z1p0, style, pgaRock)
phi = self.calcPhi(Mw, rJB, vs30)
tau = self.calcTau(Mw)
- stdDev = self.calcStdDev(phi, tau)
+ stdDev = self.calcStdDev(phi, tau) # noqa: N806
return mean, stdDev, tau, phi
- def get_IM(self, Mw, site_rup_dict, site_info, im_info):
- vsInf = bool(site_info['vsInferred'])
+ def get_IM(self, Mw, site_rup_dict, site_info, im_info): # noqa: N802, N803, D102
+ vsInf = bool(site_info['vsInferred']) # noqa: N806, F841
style = self.getFaultFromRake(site_rup_dict['aveRake'])
if 'SA' in im_info['Type']:
- cur_T = im_info.get('Periods', None)
+ cur_T = im_info.get('Periods', None) # noqa: N806
elif im_info['Type'] == 'PGA':
- cur_T = ['PGA']
+ cur_T = ['PGA'] # noqa: N806
elif im_info['Type'] == 'PGV':
- cur_T = ['PGV']
+ cur_T = ['PGV'] # noqa: N806
else:
- print(f'The IM type {im_info["Type"]} is not supported')
- meanList = []
- stdDevList = []
- InterEvStdDevList = []
- IntraEvStdDevList = []
- for Tj in cur_T:
+ print(f'The IM type {im_info["Type"]} is not supported') # noqa: T201
+ meanList = [] # noqa: N806
+ stdDevList = [] # noqa: N806
+ InterEvStdDevList = [] # noqa: N806
+ IntraEvStdDevList = [] # noqa: N806
+ for Tj in cur_T: # noqa: N806
start = time.process_time_ns()
self.setIMT(Tj)
self.timeSetImt += time.process_time_ns() - start
start = time.process_time_ns()
- mean, stdDev, InterEvStdDev, IntraEvStdDev = self.calc(
+ mean, stdDev, InterEvStdDev, IntraEvStdDev = self.calc( # noqa: N806
Mw,
site_info['rJB'],
site_info['vs30'],
@@ -826,24 +826,24 @@ def get_IM(self, Mw, site_rup_dict, site_info, im_info):
stdDevList.append(stdDev)
InterEvStdDevList.append(InterEvStdDev)
IntraEvStdDevList.append(IntraEvStdDev)
- saResult = {
+ saResult = { # noqa: N806
'Mean': meanList,
'TotalStdDev': stdDevList,
'InterEvStdDev': InterEvStdDevList,
'IntraEvStdDev': IntraEvStdDevList,
}
- return saResult
+ return saResult # noqa: RET504
# Campbell & Bozorgnia (2014)
-class campbell_bozorgnia_2014:
- timeSetImt = 0
- timeCalc = 0
- supportedImt = None
+class campbell_bozorgnia_2014: # noqa: D101
+ timeSetImt = 0 # noqa: N815
+ timeCalc = 0 # noqa: N815
+ supportedImt = None # noqa: N815
def __init__(self):
self.coeff = pd.read_csv(
- os.path.join(os.path.dirname(__file__), 'data', 'CB14.csv')
+ os.path.join(os.path.dirname(__file__), 'data', 'CB14.csv') # noqa: PTH118, PTH120
)
self.coeff.iloc[:-2, 0] = self.coeff.iloc[:-2, 0].apply(lambda x: float(x))
self.coeff = self.coeff.set_index('T')
@@ -861,7 +861,7 @@ def __init__(self):
self.phi_hi_PGA = self.coeff['phi2']['PGA']
self.phi_lo_PGA = self.coeff['phi1']['PGA']
- def setIMT(self, imt):
+ def setIMT(self, imt): # noqa: N802, D102
if imt not in self.supportedImt:
sys.exit(
f'The imt {imt} is not supported by Campbell & Bozorgnia (2014)'
@@ -904,109 +904,109 @@ def setIMT(self, imt):
self.tau2 = self.coeff['tau2'][imt]
self.rho = self.coeff['rho'][imt]
- def getFaultFromRake(self, rake):
- if rake >= 135 or rake <= -135 or (rake >= -45 and rake <= 45):
+ def getFaultFromRake(self, rake): # noqa: N802, D102
+ if rake >= 135 or rake <= -135 or (rake >= -45 and rake <= 45): # noqa: PLR2004
return 'STRIKE_SLIP'
- elif rake >= 45 and rake <= 135:
+ elif rake >= 45 and rake <= 135: # noqa: RET505, PLR2004
return 'REVERSE'
else:
return 'NORMAL'
- def calcZ25ref(self, vs30):
+ def calcZ25ref(self, vs30): # noqa: N802, D102
return np.exp(7.089 - 1.144 * np.log(vs30))
- def calcMean(
+ def calcMean( # noqa: C901, N802, D102
self,
- Mw,
- rJB,
- rRup,
- rX,
+ Mw, # noqa: N803
+ rJB, # noqa: N803
+ rRup, # noqa: N803
+ rX, # noqa: N803
dip,
width,
- zTop,
- zHyp,
+ zTop, # noqa: N803
+ zHyp, # noqa: N803
vs30,
z2p5,
style,
- pgaRock,
+ pgaRock, # noqa: N803
):
- Fmag = self.c0 + self.c1 * Mw
- if Mw > 6.5:
- Fmag += (
+ Fmag = self.c0 + self.c1 * Mw # noqa: N806
+ if Mw > 6.5: # noqa: PLR2004
+ Fmag += ( # noqa: N806
self.c2 * (Mw - 4.5) + self.c3 * (Mw - 5.5) + self.c4 * (Mw - 6.5)
)
- elif Mw > 5.5:
- Fmag += self.c2 * (Mw - 4.5) + self.c3 * (Mw - 5.5)
- elif Mw > 4.5:
- Fmag += self.c2 * (Mw - 4.5)
+ elif Mw > 5.5: # noqa: PLR2004
+ Fmag += self.c2 * (Mw - 4.5) + self.c3 * (Mw - 5.5) # noqa: N806
+ elif Mw > 4.5: # noqa: PLR2004
+ Fmag += self.c2 * (Mw - 4.5) # noqa: N806
r = np.sqrt(rRup * rRup + self.c7 * self.c7)
- Fr = (self.c5 + self.c6 * Mw) * np.log(r)
- Fflt = 0.0
- if style == 'NORMAL' and Mw > 4.5:
- Fflt = self.c9
- if Mw <= 5.5:
- Fflt *= Mw - 4.5
- Fhw = 0.0
- if rX >= 0.0 and Mw > 5.5 and zTop <= 16.66:
+ Fr = (self.c5 + self.c6 * Mw) * np.log(r) # noqa: N806
+ Fflt = 0.0 # noqa: N806
+ if style == 'NORMAL' and Mw > 4.5: # noqa: PLR2004
+ Fflt = self.c9 # noqa: N806
+ if Mw <= 5.5: # noqa: PLR2004
+ Fflt *= Mw - 4.5 # noqa: N806
+ Fhw = 0.0 # noqa: N806
+ if rX >= 0.0 and Mw > 5.5 and zTop <= 16.66: # noqa: PLR2004
r1 = width * np.cos(np.radians(dip))
r2 = 62.0 * Mw - 350.0
- rXr1 = rX / r1
- rXr2r1 = (rX - r1) / (r2 - r1)
- f1_rX = self.h1 + self.h2 * rXr1 + self.h3 * (rXr1 * rXr1)
- f2_rX = self.H4 + self.h5 * (rXr2r1) + self.h6 * rXr2r1 * rXr2r1
- Fhw_rX = max(f2_rX, 0.0) if (rX >= r1) else f1_rX
- Fhw_rRup = 1.0 if (rRup == 0.0) else (rRup - rJB) / rRup
- Fhw_m = 1.0 + self.a2 * (Mw - 6.5)
- if Mw <= 6.5:
- Fhw_m *= Mw - 5.5
- Fhw_z = 1.0 - 0.06 * zTop
- Fhw_d = (90.0 - dip) / 45.0
- Fhw = self.c10 * Fhw_rX * Fhw_rRup * Fhw_m * Fhw_z * Fhw_d
+ rXr1 = rX / r1 # noqa: N806
+ rXr2r1 = (rX - r1) / (r2 - r1) # noqa: N806
+ f1_rX = self.h1 + self.h2 * rXr1 + self.h3 * (rXr1 * rXr1) # noqa: N806
+ f2_rX = self.H4 + self.h5 * (rXr2r1) + self.h6 * rXr2r1 * rXr2r1 # noqa: N806
+ Fhw_rX = max(f2_rX, 0.0) if (rX >= r1) else f1_rX # noqa: N806
+ Fhw_rRup = 1.0 if (rRup == 0.0) else (rRup - rJB) / rRup # noqa: N806
+ Fhw_m = 1.0 + self.a2 * (Mw - 6.5) # noqa: N806
+ if Mw <= 6.5: # noqa: PLR2004
+ Fhw_m *= Mw - 5.5 # noqa: N806
+ Fhw_z = 1.0 - 0.06 * zTop # noqa: N806
+ Fhw_d = (90.0 - dip) / 45.0 # noqa: N806
+ Fhw = self.c10 * Fhw_rX * Fhw_rRup * Fhw_m * Fhw_z * Fhw_d # noqa: N806
vsk1 = vs30 / self.k1
if vs30 <= self.k1:
- Fsite = self.c11 * np.log(vsk1) + self.k2 * (
+ Fsite = self.c11 * np.log(vsk1) + self.k2 * ( # noqa: N806
np.log(pgaRock + self.C * np.power(vsk1, self.N))
- np.log(pgaRock + self.C)
)
else:
- Fsite = (self.c11 + self.k2 * self.N) * np.log(vsk1)
+ Fsite = (self.c11 + self.k2 * self.N) * np.log(vsk1) # noqa: N806
if np.isnan(z2p5):
z2p5 = self.calcZ25ref(vs30)
- Fsed = 0.0
+ Fsed = 0.0 # noqa: N806
if z2p5 <= 1.0:
- Fsed = self.c14 * (z2p5 - 1.0)
- elif z2p5 > 3.0:
- Fsed = (
+ Fsed = self.c14 * (z2p5 - 1.0) # noqa: N806
+ elif z2p5 > 3.0: # noqa: PLR2004
+ Fsed = ( # noqa: N806
self.c16
* self.k3
* np.exp(-0.75)
* (1.0 - np.exp(-0.25 * (z2p5 - 3.0)))
)
- if zHyp <= 7.0:
- Fhyp = 0.0
- elif zHyp <= 20.0:
- Fhyp = zHyp - 7.0
+ if zHyp <= 7.0: # noqa: PLR2004
+ Fhyp = 0.0 # noqa: N806
+ elif zHyp <= 20.0: # noqa: PLR2004
+ Fhyp = zHyp - 7.0 # noqa: N806
else:
- Fhyp = 13.0
- if Mw <= 5.5:
- Fhyp *= self.c17
- elif Mw <= 6.5:
- Fhyp *= self.c17 + (self.c18 - self.c17) * (Mw - 5.5)
+ Fhyp = 13.0 # noqa: N806
+ if Mw <= 5.5: # noqa: PLR2004
+ Fhyp *= self.c17 # noqa: N806
+ elif Mw <= 6.5: # noqa: PLR2004
+ Fhyp *= self.c17 + (self.c18 - self.c17) * (Mw - 5.5) # noqa: N806
else:
- Fhyp *= self.c18
- if Mw > 5.5:
- Fdip = 0.0
- elif Mw > 4.5:
- Fdip = self.c19 * (5.5 - Mw) * dip
+ Fhyp *= self.c18 # noqa: N806
+ if Mw > 5.5: # noqa: PLR2004
+ Fdip = 0.0 # noqa: N806
+ elif Mw > 4.5: # noqa: PLR2004
+ Fdip = self.c19 * (5.5 - Mw) * dip # noqa: N806
else:
- Fdip = self.c19 * dip
- if rRup > 80.0:
- Fatn = self.c20 * (rRup - 80.0)
+ Fdip = self.c19 * dip # noqa: N806
+ if rRup > 80.0: # noqa: PLR2004
+ Fatn = self.c20 * (rRup - 80.0) # noqa: N806
else:
- Fatn = 0.0
+ Fatn = 0.0 # noqa: N806
return Fmag + Fr + Fflt + Fhw + Fsite + Fsed + Fhyp + Fdip + Fatn
- def calcAlpha(self, vs30, pgaRock):
+ def calcAlpha(self, vs30, pgaRock): # noqa: N802, N803, D102
vsk1 = vs30 / self.k1
if vs30 < self.k1:
alpha = (
@@ -1021,52 +1021,52 @@ def calcAlpha(self, vs30, pgaRock):
alpha = 0.0
return alpha
- def stdMagDep(self, lo, hi, Mw):
+ def stdMagDep(self, lo, hi, Mw): # noqa: N802, N803, D102
return hi + (lo - hi) * (5.5 - Mw)
- def calcPhiSq(self, Mw, alpha):
- if Mw <= 4.5:
- phi_lnY = self.phi1
- phi_lnPGAB = self.phi_lo_PGA
- elif Mw < 5.5:
- phi_lnY = self.stdMagDep(self.phi1, self.phi2, Mw)
- phi_lnPGAB = self.stdMagDep(self.phi_lo_PGA, self.phi_hi_PGA, Mw)
+ def calcPhiSq(self, Mw, alpha): # noqa: N802, N803, D102
+ if Mw <= 4.5: # noqa: PLR2004
+ phi_lnY = self.phi1 # noqa: N806
+ phi_lnPGAB = self.phi_lo_PGA # noqa: N806
+ elif Mw < 5.5: # noqa: PLR2004
+ phi_lnY = self.stdMagDep(self.phi1, self.phi2, Mw) # noqa: N806
+ phi_lnPGAB = self.stdMagDep(self.phi_lo_PGA, self.phi_hi_PGA, Mw) # noqa: N806
else:
- phi_lnY = self.phi2
- phi_lnPGAB = self.phi_hi_PGA
- phi_lnYB = np.sqrt(phi_lnY * phi_lnY - self.PHI_LNAF_SQ)
- phi_lnPGAB = np.sqrt(phi_lnPGAB * phi_lnPGAB - self.PHI_LNAF_SQ)
- aPhi_lnPGAB = alpha * phi_lnPGAB
- phiSq = (
+ phi_lnY = self.phi2 # noqa: N806
+ phi_lnPGAB = self.phi_hi_PGA # noqa: N806
+ phi_lnYB = np.sqrt(phi_lnY * phi_lnY - self.PHI_LNAF_SQ) # noqa: N806
+ phi_lnPGAB = np.sqrt(phi_lnPGAB * phi_lnPGAB - self.PHI_LNAF_SQ) # noqa: N806
+ aPhi_lnPGAB = alpha * phi_lnPGAB # noqa: N806
+ phiSq = ( # noqa: N806
phi_lnY * phi_lnY
+ aPhi_lnPGAB * aPhi_lnPGAB
+ 2.0 * self.rho * phi_lnYB * aPhi_lnPGAB
)
- return phiSq
-
- def calcTauSq(self, Mw, alpha):
- if Mw <= 4.5:
- tau_lnYB = self.tau1
- tau_lnPGAB = self.tau_lo_PGA
- elif Mw < 5.5:
- tau_lnYB = self.stdMagDep(self.tau1, self.tau2, Mw)
- tau_lnPGAB = self.stdMagDep(self.tau_lo_PGA, self.tau_hi_PGA, Mw)
+ return phiSq # noqa: RET504
+
+ def calcTauSq(self, Mw, alpha): # noqa: N802, N803, D102
+ if Mw <= 4.5: # noqa: PLR2004
+ tau_lnYB = self.tau1 # noqa: N806
+ tau_lnPGAB = self.tau_lo_PGA # noqa: N806
+ elif Mw < 5.5: # noqa: PLR2004
+ tau_lnYB = self.stdMagDep(self.tau1, self.tau2, Mw) # noqa: N806
+ tau_lnPGAB = self.stdMagDep(self.tau_lo_PGA, self.tau_hi_PGA, Mw) # noqa: N806
else:
- tau_lnYB = self.tau2
- tau_lnPGAB = self.tau_hi_PGA
- alphaTau = alpha * tau_lnPGAB
- tauSq = (
+ tau_lnYB = self.tau2 # noqa: N806
+ tau_lnPGAB = self.tau_hi_PGA # noqa: N806
+ alphaTau = alpha * tau_lnPGAB # noqa: N806
+ tauSq = ( # noqa: N806
tau_lnYB * tau_lnYB
+ alphaTau * alphaTau
+ 2.0 * alpha * self.rho * tau_lnYB * tau_lnPGAB
)
- return tauSq
+ return tauSq # noqa: RET504
- def calc(self, Mw, rJB, rRup, rX, dip, width, zTop, zHyp, vs30, z2p5, style):
+ def calc(self, Mw, rJB, rRup, rX, dip, width, zTop, zHyp, vs30, z2p5, style): # noqa: N803, D102
if vs30 < self.k1:
imt_tmp = self.imt
self.setIMT('PGA')
- pgaRock = np.exp(
+ pgaRock = np.exp( # noqa: N806
self.calcMean(
Mw,
rJB,
@@ -1084,45 +1084,45 @@ def calc(self, Mw, rJB, rRup, rX, dip, width, zTop, zHyp, vs30, z2p5, style):
)
self.setIMT(imt_tmp)
else:
- pgaRock = 0.0
+ pgaRock = 0.0 # noqa: N806
mean = self.calcMean(
Mw, rJB, rRup, rX, dip, width, zTop, zHyp, vs30, z2p5, style, pgaRock
)
- if self.imt != 'PGA' and self.imt != 'PGV' and self.imt <= 0.25:
+ if self.imt != 'PGA' and self.imt != 'PGV' and self.imt <= 0.25: # noqa: PLR1714, PLR2004
imt_tmp = self.imt
self.setIMT('PGA')
- pgaMean = self.calcMean(
+ pgaMean = self.calcMean( # noqa: N806
Mw, rJB, rRup, rX, dip, width, zTop, zHyp, vs30, z2p5, style, pgaRock
)
mean = max(mean, pgaMean)
self.setIMT(imt_tmp)
alpha = self.calcAlpha(vs30, pgaRock)
- phiSq = self.calcPhiSq(Mw, alpha)
- tauSq = self.calcTauSq(Mw, alpha)
- stdDev = np.sqrt(phiSq + tauSq)
+ phiSq = self.calcPhiSq(Mw, alpha) # noqa: N806
+ tauSq = self.calcTauSq(Mw, alpha) # noqa: N806
+ stdDev = np.sqrt(phiSq + tauSq) # noqa: N806
return mean, stdDev, np.sqrt(tauSq), np.sqrt(phiSq)
- def get_IM(self, Mw, site_rup_dict, site_info, im_info):
- vsInf = bool(site_info['vsInferred'])
+ def get_IM(self, Mw, site_rup_dict, site_info, im_info): # noqa: N802, N803, D102
+ vsInf = bool(site_info['vsInferred']) # noqa: N806, F841
style = self.getFaultFromRake(site_rup_dict['aveRake'])
if 'SA' in im_info['Type']:
- cur_T = im_info.get('Periods', None)
+ cur_T = im_info.get('Periods', None) # noqa: N806
elif im_info['Type'] == 'PGA':
- cur_T = ['PGA']
+ cur_T = ['PGA'] # noqa: N806
elif im_info['Type'] == 'PGV':
- cur_T = ['PGV']
+ cur_T = ['PGV'] # noqa: N806
else:
- print(f'The IM type {im_info["Type"]} is not supported')
- meanList = []
- stdDevList = []
- InterEvStdDevList = []
- IntraEvStdDevList = []
- for Tj in cur_T:
+ print(f'The IM type {im_info["Type"]} is not supported') # noqa: T201
+ meanList = [] # noqa: N806
+ stdDevList = [] # noqa: N806
+ InterEvStdDevList = [] # noqa: N806
+ IntraEvStdDevList = [] # noqa: N806
+ for Tj in cur_T: # noqa: N806
start = time.process_time_ns()
self.setIMT(Tj)
self.timeSetImt += time.process_time_ns() - start
start = time.process_time_ns()
- mean, stdDev, InterEvStdDev, IntraEvStdDev = self.calc(
+ mean, stdDev, InterEvStdDev, IntraEvStdDev = self.calc( # noqa: N806
Mw,
site_info['rJB'],
site_info['rRup'],
@@ -1140,10 +1140,10 @@ def get_IM(self, Mw, site_rup_dict, site_info, im_info):
stdDevList.append(stdDev)
InterEvStdDevList.append(InterEvStdDev)
IntraEvStdDevList.append(IntraEvStdDev)
- saResult = {
+ saResult = { # noqa: N806
'Mean': meanList,
'TotalStdDev': stdDevList,
'InterEvStdDev': InterEvStdDevList,
'IntraEvStdDev': IntraEvStdDevList,
}
- return saResult
+ return saResult # noqa: RET504
diff --git a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py
index 0c6ca4984..82c142387 100644
--- a/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py
+++ b/modules/performRegionalEventSimulation/regionalGroundMotion/liquefaction.py
@@ -1,4 +1,4 @@
-import os
+import os # noqa: INP001, D100
import sys
import warnings
from enum import Enum
@@ -6,7 +6,7 @@
import geopandas as gpd
import numpy as np
-import pandas
+import pandas # noqa: ICN001
import rasterio as rio
import shapely
from pyproj import CRS, Transformer
@@ -15,7 +15,7 @@
# Helper functions
-def sampleRaster(
+def sampleRaster( # noqa: N802
raster_file_path,
raster_crs,
x,
@@ -23,8 +23,8 @@ def sampleRaster(
interp_scheme='nearest',
dtype=None,
):
- """Performs 2D interpolation at (x,y) pairs. Accepted interp_scheme = 'nearest', 'linear', 'cubic', and 'quintic'"""
- print(f'Sampling from the Raster File: {os.path.basename(raster_file_path)}...')
+ """Performs 2D interpolation at (x,y) pairs. Accepted interp_scheme = 'nearest', 'linear', 'cubic', and 'quintic'""" # noqa: D400, D401
+ print(f'Sampling from the Raster File: {os.path.basename(raster_file_path)}...') # noqa: T201, PTH119
invalid_value = np.nan
xy_crs = CRS.from_user_input(4326)
raster_crs = CRS.from_user_input(raster_crs)
@@ -32,10 +32,10 @@ def sampleRaster(
try:
raster_data = raster_file.read()
if raster_data.shape[0] > 1:
- warnings.warn(
+ warnings.warn( # noqa: B028
f'More than one band in the file {raster_file_path}, the first band is used.'
)
- except:
+ except: # noqa: E722
sys.exit(f'Can not read data from {raster_file_path}')
if xy_crs != raster_crs:
# make transformer for reprojection
@@ -81,24 +81,28 @@ def sampleRaster(
if dtype is not None:
sample = sample.astype(dtype)
# clean up invalid values (returned as 1e38 by NumPy)
- sample[abs(sample) > 1e10] = invalid_value
+ sample[abs(sample) > 1e10] = invalid_value # noqa: PLR2004
return sample
# Helper functions
-def sampleVector(vector_file_path, vector_crs, x, y, dtype=None):
- """Performs spatial join of vector_file with xy'"""
- print(f'Sampling from the Vector File: {os.path.basename(vector_file_path)}...')
- invalid_value = np.nan
+def sampleVector(vector_file_path, vector_crs, x, y, dtype=None): # noqa: ARG001, N802
+ """Performs spatial join of vector_file with xy'""" # noqa: D400, D401
+ print(f'Sampling from the Vector File: {os.path.basename(vector_file_path)}...') # noqa: T201, PTH119
+ invalid_value = np.nan # noqa: F841
xy_crs = CRS.from_user_input(4326)
vector_gdf = gpd.read_file(vector_file_path)
try:
user_crs_input = CRS.from_user_input(vector_crs).to_epsg()
if vector_gdf.crs.to_epsg() != user_crs_input:
- sys.exit(f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models")
- except:
- print("The input CRS ({xy_crs}) defined for liquefaction triggering models is invalid. The CRS of vector files are used")
+ sys.exit(
+ f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models"
+ )
+ except: # noqa: E722
+ print( # noqa: T201
+ 'The input CRS ({xy_crs}) defined for liquefaction triggering models is invalid. The CRS of vector files are used'
+ )
# if vector_gdf.crs != vector_crs:
# sys.exit(f"The CRS of vector file {vector_file_path} is {vector_gdf.crs}, and doesn't match the input CRS ({xy_crs}) defined for liquefaction triggering models")
@@ -119,8 +123,8 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None):
vertices = sites[np.append(vertices, vertices[0])]
centroid = np.mean(vertices, axis=0)
vertices = vertices + 0.05 * (vertices - centroid)
- RoI = shapely.geometry.Polygon(vertices)
- except:
+ RoI = shapely.geometry.Polygon(vertices) # noqa: N806
+ except: # noqa: E722
centroid = shapely.geometry.Point(np.mean(x), np.mean(y))
points = [shapely.geometry.Point(x[i], y[i]) for i in range(len(x))]
if len(points) == 1:
@@ -136,8 +140,8 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None):
)
for angle in angles
]
- RoI = shapely.geometry.Polygon(circle_points)
- data = dict()
+ RoI = shapely.geometry.Polygon(circle_points) # noqa: N806
+ data = dict() # noqa: C408
for col in vector_gdf.columns:
data.update({col: []})
for row_index in vector_gdf.index:
@@ -158,14 +162,14 @@ def sampleVector(vector_file_path, vector_crs, x, y, dtype=None):
)
merged = merged.set_index('index_right').sort_index().drop(columns=['geometry'])
gdf_sites = pandas.merge(gdf_sites, merged, on='index', how='left')
- gdf_sites.drop(columns=['geometry', 'index'], inplace=True)
+ gdf_sites.drop(columns=['geometry', 'index'], inplace=True) # noqa: PD002
return gdf_sites
-def find_additional_output_req(liq_info, current_step):
+def find_additional_output_req(liq_info, current_step): # noqa: D103
additional_output_keys = []
if current_step == 'Triggering':
- trigging_parameters = liq_info['Triggering']['Parameters'].keys()
+ trigging_parameters = liq_info['Triggering']['Parameters'].keys() # noqa: F841
triger_dist_water = liq_info['Triggering']['Parameters'].get(
'DistWater', None
)
@@ -174,7 +178,7 @@ def find_additional_output_req(liq_info, current_step):
lat_dist_water = liq_info['LateralSpreading']['Parameters'].get(
'DistWater', None
)
- if 'LateralSpreading' in liq_info.keys():
+ if 'LateralSpreading' in liq_info.keys(): # noqa: SIM118
lat_dist_water = liq_info['LateralSpreading']['Parameters'].get(
'DistWater', None
)
@@ -185,7 +189,7 @@ def find_additional_output_req(liq_info, current_step):
return additional_output_keys
-class liq_susc_enum(Enum):
+class liq_susc_enum(Enum): # noqa: D101
very_high = 5
high = 4
moderate = 3
@@ -195,7 +199,7 @@ class liq_susc_enum(Enum):
# Triggering:
-class Liquefaction:
+class Liquefaction: # noqa: D101
def __init__(self) -> None:
pass
@@ -249,7 +253,7 @@ class ZhuEtal2017(Liquefaction):
----------
.. [1] Zhu, J., Baise, L.G., and Thompson, E.M., 2017, An Updated Geospatial Liquefaction Model for Global Application, Bulletin of the Seismological Society of America, vol. 107, no. 3, pp. 1365-1385.
- """
+ """ # noqa: D400
def __init__(self, parameters, stations) -> None:
self.stations = stations
@@ -262,7 +266,7 @@ def __init__(self, parameters, stations) -> None:
self.vs30 = None # (m/s)
self.interpolate_spatial_parameters(parameters)
- def interpolate_spatial_parameters(self, parameters):
+ def interpolate_spatial_parameters(self, parameters): # noqa: D102
# site coordinate in CRS 4326
lat_station = [site['lat'] for site in self.stations]
lon_station = [site['lon'] for site in self.stations]
@@ -326,14 +330,14 @@ def interpolate_spatial_parameters(self, parameters):
lat_station,
)
self.vs30 = np.array([site['vs30'] for site in self.stations])
- print('Sampling finished')
+ print('Sampling finished') # noqa: T201
- def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys):
+ def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys): # noqa: D102
if ('PGA' in im_list) and ('PGV' in im_list):
num_stations = len(self.stations)
num_scenarios = len(eq_data)
- PGV_col_id = [i for i, x in enumerate(im_list) if x == 'PGV'][0]
- PGA_col_id = [i for i, x in enumerate(im_list) if x == 'PGA'][0]
+ PGV_col_id = [i for i, x in enumerate(im_list) if x == 'PGV'][0] # noqa: N806, RUF015
+ PGA_col_id = [i for i, x in enumerate(im_list) if x == 'PGA'][0] # noqa: N806, RUF015
for scenario_id in range(num_scenarios):
num_rlzs = ln_im_data[scenario_id].shape[2]
im_data_scen = np.zeros(
@@ -349,11 +353,11 @@ def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys)
im_data_scen[:, len(im_list) + i, rlz_id] = model_output[key]
ln_im_data[scenario_id] = im_data_scen
im_list = im_list + output_keys
- additional_output = dict()
+ additional_output = dict() # noqa: C408
for key in additional_output_keys:
item = getattr(self, key, None)
if item is None:
- warnings.warn(
+ warnings.warn( # noqa: B028
f"Additional output {key} is not available in the liquefaction trigging model 'ZhuEtal2017'."
)
else:
@@ -369,7 +373,7 @@ def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys)
return ln_im_data, eq_data, im_list, additional_output
def model(self, pgv, pga, mag):
- """Model"""
+ """Model""" # noqa: D400
# zero prob_liq
zero_prob_liq = 1e-5 # decimal
@@ -394,7 +398,7 @@ def model(self, pgv, pga, mag):
ind_global = ~(self.dist_to_water <= model_transition)
# set cap of precip to 1700 mm
- self.precip[self.precip > 1700] = 1700
+ self.precip[self.precip > 1700] = 1700 # noqa: PLR2004
# x = b0 + b1*var1 + ...
# if len(ind_global) > 0:
@@ -435,22 +439,22 @@ def model(self, pgv, pga, mag):
) # set prob to > "0" to avoid 0% in log
# for pgv_mag < 3 cm/s, set prob to "0"
- prob_liq[pgv_mag < 3] = zero_prob_liq
+ prob_liq[pgv_mag < 3] = zero_prob_liq # noqa: PLR2004
# for pga_mag < 0.1 g, set prob to "0"
- prob_liq[pga_mag < 0.1] = zero_prob_liq
+ prob_liq[pga_mag < 0.1] = zero_prob_liq # noqa: PLR2004
# for vs30 > 620 m/s, set prob to "0"
- prob_liq[self.vs30 > 620] = zero_prob_liq
+ prob_liq[self.vs30 > 620] = zero_prob_liq # noqa: PLR2004
# calculate sigma_mu
- sigma_mu = (np.exp(0.25) - 1) * prob_liq
+ sigma_mu = (np.exp(0.25) - 1) * prob_liq # noqa: F841
# determine liquefaction susceptibility category
- liq_susc[liq_susc_val > -1.15] = liq_susc_enum['very_high'].value
- liq_susc[liq_susc_val <= -1.15] = liq_susc_enum['high'].value
- liq_susc[liq_susc_val <= -1.95] = liq_susc_enum['moderate'].value
- liq_susc[liq_susc_val <= -3.15] = liq_susc_enum['low'].value
- liq_susc[liq_susc_val <= -3.20] = liq_susc_enum['very_low'].value
- liq_susc[liq_susc_val <= -38.1] = liq_susc_enum['none'].value
+ liq_susc[liq_susc_val > -1.15] = liq_susc_enum['very_high'].value # noqa: PLR2004
+ liq_susc[liq_susc_val <= -1.15] = liq_susc_enum['high'].value # noqa: PLR2004
+ liq_susc[liq_susc_val <= -1.95] = liq_susc_enum['moderate'].value # noqa: PLR2004
+ liq_susc[liq_susc_val <= -3.15] = liq_susc_enum['low'].value # noqa: PLR2004
+ liq_susc[liq_susc_val <= -3.20] = liq_susc_enum['very_low'].value # noqa: PLR2004
+ liq_susc[liq_susc_val <= -38.1] = liq_susc_enum['none'].value # noqa: PLR2004
# liq_susc[prob_liq==zero_prob_liq] = 'none'
@@ -488,7 +492,7 @@ class Hazus2020(Liquefaction):
.. [1] Federal Emergency Management Agency (FEMA), 2020, Hazus Earthquake Model - Technical Manual, Hazus 4.2 SP3, 436 pp. https://www.fema.gov/flood-maps/tools-resources/flood-map-products/hazus/user-technical-manuals.
.. [2] Liao, S.S., Veneziano, D., and Whitman, R.V., 1988, Regression Models for Evaluating Liquefaction Probability, Journal of Geotechnical Engineering, vol. 114, no. 4, pp. 389-411.
- """
+ """ # noqa: D205, D400
def __init__(self, parameters, stations) -> None:
self.stations = stations
@@ -496,7 +500,7 @@ def __init__(self, parameters, stations) -> None:
self.gw_depth = None # (m)
self.interpolate_spatial_parameters(parameters)
- def interpolate_spatial_parameters(self, parameters):
+ def interpolate_spatial_parameters(self, parameters): # noqa: D102
# site coordinate in CRS 4326
lat_station = [site['lat'] for site in self.stations]
lon_station = [site['lon'] for site in self.stations]
@@ -516,17 +520,17 @@ def interpolate_spatial_parameters(self, parameters):
np.array([site['liqSusc'] for site in self.stations]),
columns=['liqSusc'],
)
- SusceptibilityKey = 'liqSusc'
+ SusceptibilityKey = 'liqSusc' # noqa: N806
else:
- SusceptibilityFile = parameters['SusceptibilityFile']
+ SusceptibilityFile = parameters['SusceptibilityFile'] # noqa: N806
liq_susc_samples = sampleVector(
SusceptibilityFile, parameters['inputCRS'], lon_station, lat_station
)
- SusceptibilityKey = parameters['SusceptibilityKey']
+ SusceptibilityKey = parameters['SusceptibilityKey'] # noqa: N806
self.liq_susc = []
for susc in liq_susc_samples[SusceptibilityKey].unique():
if susc not in list(liq_susc_enum.__members__.keys()):
- warnings.warn(
+ warnings.warn( # noqa: B028
f'Unkown susceptibility "{susc}" defined, and is treated as "none".'
)
for row_index in liq_susc_samples.index:
@@ -545,13 +549,13 @@ def interpolate_spatial_parameters(self, parameters):
self.liq_susc = np.array(self.liq_susc)
# liq_susc = liq_susc_samples[parameters["SusceptibilityKey"]].fillna("NaN")
# self.liq_susc = liq_susc.to_numpy()
- print('Sampling finished')
+ print('Sampling finished') # noqa: T201
- def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys):
+ def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys): # noqa: D102
if 'PGA' in im_list:
num_stations = len(self.stations)
num_scenarios = len(eq_data)
- PGA_col_id = [i for i, x in enumerate(im_list) if x == 'PGA'][0]
+ PGA_col_id = [i for i, x in enumerate(im_list) if x == 'PGA'][0] # noqa: N806, RUF015
for scenario_id in range(num_scenarios):
num_rlzs = ln_im_data[scenario_id].shape[2]
im_data_scen = np.zeros(
@@ -566,11 +570,11 @@ def run(self, ln_im_data, eq_data, im_list, output_keys, additional_output_keys)
im_data_scen[:, len(im_list) + i, rlz_id] = model_output[key]
ln_im_data[scenario_id] = im_data_scen
im_list = im_list + output_keys
- additional_output = dict()
+ additional_output = dict() # noqa: C408
for key in additional_output_keys:
item = getattr(self, key, None)
if item is None:
- warnings.warn(
+ warnings.warn( # noqa: B028
f"Additional output {key} is not available in the liquefaction trigging model 'Hazus2020'."
)
else:
@@ -588,9 +592,9 @@ def model(
mag, # upstream PBEE RV
gw_depth, # geotechnical/geologic
liq_susc, # fixed/toggles
- return_inter_params=False, # to get intermediate params
+ return_inter_params=False, # to get intermediate params # noqa: FBT002, ARG004
):
- """Model"""
+ """Model""" # noqa: D400
# zero prob_liq
zero_prob_liq = 1e-5 # decimal
@@ -655,7 +659,7 @@ def model(
# for pga_mag < 0.1 g, set prob to "0"
# magnitude correction, from Baise & Rashidian (2020) and Allstadt et al. (2022)
pga_mag = pga / (10**2.24 / mag**2.56)
- prob_liq[pga_mag < 0.1] = zero_prob_liq
+ prob_liq[pga_mag < 0.1] = zero_prob_liq # noqa: PLR2004
return {'liq_prob': prob_liq, 'liq_susc': liq_susc}
@@ -704,7 +708,7 @@ class Hazus2020_with_ZhuEtal2017(ZhuEtal2017):
"""
def model(self, pgv, pga, mag):
- """Model"""
+ """Model""" # noqa: D400
# zero prob_liq
zero_prob_liq = 1e-5 # decimal
@@ -724,7 +728,7 @@ def model(self, pgv, pga, mag):
ind_global = ~(self.dist_to_water <= model_transition)
# set cap of precip to 1700 mm
- self.precip[self.precip > 1700] = 1700
+ self.precip[self.precip > 1700] = 1700 # noqa: PLR2004
# x = b0 + b1*var1 + ...
# if len(ind_global) > 0:
@@ -751,12 +755,12 @@ def model(self, pgv, pga, mag):
liq_susc_val[np.isnan(liq_susc_val)] = -99.0
# determine liquefaction susceptibility category
- liq_susc[liq_susc_val > -1.15] = liq_susc_enum['very_high'].value
- liq_susc[liq_susc_val <= -1.15] = liq_susc_enum['high'].value
- liq_susc[liq_susc_val <= -1.95] = liq_susc_enum['moderate'].value
- liq_susc[liq_susc_val <= -3.15] = liq_susc_enum['low'].value
- liq_susc[liq_susc_val <= -3.20] = liq_susc_enum['very_low'].value
- liq_susc[liq_susc_val <= -38.1] = liq_susc_enum['none'].value
+ liq_susc[liq_susc_val > -1.15] = liq_susc_enum['very_high'].value # noqa: PLR2004
+ liq_susc[liq_susc_val <= -1.15] = liq_susc_enum['high'].value # noqa: PLR2004
+ liq_susc[liq_susc_val <= -1.95] = liq_susc_enum['moderate'].value # noqa: PLR2004
+ liq_susc[liq_susc_val <= -3.15] = liq_susc_enum['low'].value # noqa: PLR2004
+ liq_susc[liq_susc_val <= -3.20] = liq_susc_enum['very_low'].value # noqa: PLR2004
+ liq_susc[liq_susc_val <= -38.1] = liq_susc_enum['none'].value # noqa: PLR2004
# Below are HAZUS
# magnitude correction, from Baise & Rashidian (2020) and Allstadt et al. (2022)
pga_mag = pga / (10**2.24 / mag**2.56)
@@ -814,17 +818,17 @@ def model(self, pgv, pga, mag):
# Zhu et al. (2017) boundary constraints
# for pga_mag < 0.1 g, set prob to "0"
- prob_liq[pga_mag < 0.1] = zero_prob_liq
+ prob_liq[pga_mag < 0.1] = zero_prob_liq # noqa: PLR2004
# for vs30 > 620 m/s, set prob to "0"
- prob_liq[self.vs30 > 620] = zero_prob_liq
+ prob_liq[self.vs30 > 620] = zero_prob_liq # noqa: PLR2004
# for precip > 1700 mm, set prob to "0"
- prob_liq[self.precip > 1700] = zero_prob_liq
+ prob_liq[self.precip > 1700] = zero_prob_liq # noqa: PLR2004
return {'liq_prob': prob_liq, 'liq_susc': liq_susc}
# Lateral Spreading:
-class LateralSpread:
+class LateralSpread: # noqa: D101
def __init__(self) -> None:
pass
@@ -868,13 +872,13 @@ def __init__(self, stations, parameters):
super().__init__()
self.stations = stations
dist_to_water = parameters.get('DistWater')
- if type(dist_to_water) == np.array:
+ if type(dist_to_water) == np.array: # noqa: E721
self.dist_to_water = dist_to_water
elif dist_to_water == 'Defined ("distWater") in Site File (.csv)':
self.dist_to_water = np.array(
[site['distWater'] for site in self.stations]
)
- elif os.path.exists(os.path.dirname(dist_to_water)):
+ elif os.path.exists(os.path.dirname(dist_to_water)): # noqa: PTH110, PTH120
lat_station = [site['lat'] for site in self.stations]
lon_station = [site['lon'] for site in self.stations]
self.dist_to_water = sampleRaster(
@@ -883,7 +887,7 @@ def __init__(self, stations, parameters):
else:
self.dist_to_water = np.zeros(len(self.stations))
- def run(self, ln_im_data, eq_data, im_list):
+ def run(self, ln_im_data, eq_data, im_list): # noqa: D102
output_keys = ['liq_PGD_h']
if (
('PGA' in im_list)
@@ -892,11 +896,11 @@ def run(self, ln_im_data, eq_data, im_list):
):
num_stations = len(self.stations)
num_scenarios = len(eq_data)
- PGA_col_id = [i for i, x in enumerate(im_list) if x == 'PGA'][0]
- liq_prob_col_id = [i for i, x in enumerate(im_list) if x == 'liq_prob'][
+ PGA_col_id = [i for i, x in enumerate(im_list) if x == 'PGA'][0] # noqa: N806, RUF015
+ liq_prob_col_id = [i for i, x in enumerate(im_list) if x == 'liq_prob'][ # noqa: RUF015
0
]
- liq_susc_col_id = [i for i, x in enumerate(im_list) if x == 'liq_susc'][
+ liq_susc_col_id = [i for i, x in enumerate(im_list) if x == 'liq_susc'][ # noqa: RUF015
0
]
for scenario_id in range(num_scenarios):
@@ -931,9 +935,9 @@ def model(
prob_liq,
dist_water, # geotechnical/geologic
liq_susc, # fixed/toggles
- extrapolate_expected_pgdef=True,
+ extrapolate_expected_pgdef=True, # noqa: FBT002
):
- """Model"""
+ """Model""" # noqa: D400
# initialize arrays
# get threshold pga against liquefaction
@@ -950,19 +954,19 @@ def model(
# get normalized displacement in inches, a, for M=7
expected_pgdef = np.ones(pga.shape) * np.nan
expected_pgdef[ratio <= 1] = 1e-3 # above 1e-3 cm, or 1e-5 m
- expected_pgdef[np.logical_and(ratio > 1, ratio <= 2)] = (
- 12 * ratio[np.logical_and(ratio > 1, ratio <= 2)] - 12
+ expected_pgdef[np.logical_and(ratio > 1, ratio <= 2)] = ( # noqa: PLR2004
+ 12 * ratio[np.logical_and(ratio > 1, ratio <= 2)] - 12 # noqa: PLR2004
)
- expected_pgdef[np.logical_and(ratio > 2, ratio <= 3)] = (
- 18 * ratio[np.logical_and(ratio > 2, ratio <= 3)] - 24
+ expected_pgdef[np.logical_and(ratio > 2, ratio <= 3)] = ( # noqa: PLR2004
+ 18 * ratio[np.logical_and(ratio > 2, ratio <= 3)] - 24 # noqa: PLR2004
)
if extrapolate_expected_pgdef is True:
- expected_pgdef[ratio > 3] = 70 * ratio[ratio > 3] - 180
+ expected_pgdef[ratio > 3] = 70 * ratio[ratio > 3] - 180 # noqa: PLR2004
else:
- expected_pgdef[np.logical_and(ratio > 3, ratio <= 4)] = (
- 70 * ratio[np.logical_and(ratio > 3, ratio <= 4)] - 180
+ expected_pgdef[np.logical_and(ratio > 3, ratio <= 4)] = ( # noqa: PLR2004
+ 70 * ratio[np.logical_and(ratio > 3, ratio <= 4)] - 180 # noqa: PLR2004
)
- expected_pgdef[ratio > 4] = 100
+ expected_pgdef[ratio > 4] = 100 # noqa: PLR2004
expected_pgdef *= 2.54 # convert from inches to cm
# magnitude correction
@@ -971,7 +975,7 @@ def model(
# susceptibility to lateral spreading only for deposits found near water body (dw < dw_cutoff)
pgdef = k_delta * expected_pgdef * prob_liq
pgdef = pgdef / 100 # also convert from cm to m
- pgdef[dist_water > 25] = 1e-5
+ pgdef[dist_water > 25] = 1e-5 # noqa: PLR2004
# keep pgdef to minimum of 1e-5 m
pgdef = np.maximum(pgdef, 1e-5)
@@ -986,11 +990,11 @@ def model(
# output['ratio'] = ratio
# return
- return output
+ return output # noqa: RET504
# Settlement:
-class GroundSettlement:
+class GroundSettlement: # noqa: D101
def __init__(self) -> None:
pass
@@ -1030,9 +1034,9 @@ class Hazus2020Vertical(GroundSettlement):
def model(
prob_liq, # geotechnical/geologic
liq_susc, # fixed/toggles
- return_inter_params=False, # to get intermediate params
+ return_inter_params=False, # to get intermediate params # noqa: FBT002
):
- """Model"""
+ """Model""" # noqa: D400
# initialize arrays
# get threshold pga against liquefaction, in cm
pgdef = np.ones(liq_susc.shape) * np.nan
@@ -1061,15 +1065,15 @@ def model(
# return
return output
- def run(self, ln_im_data, eq_data, im_list):
+ def run(self, ln_im_data, eq_data, im_list): # noqa: D102
output_keys = ['liq_PGD_v']
if ('liq_susc' in im_list) and ('liq_prob' in im_list):
num_stations = ln_im_data[0].shape[0]
num_scenarios = len(eq_data)
- liq_prob_col_id = [i for i, x in enumerate(im_list) if x == 'liq_prob'][
+ liq_prob_col_id = [i for i, x in enumerate(im_list) if x == 'liq_prob'][ # noqa: RUF015
0
]
- liq_susc_col_id = [i for i, x in enumerate(im_list) if x == 'liq_susc'][
+ liq_susc_col_id = [i for i, x in enumerate(im_list) if x == 'liq_susc'][ # noqa: RUF015
0
]
for scenario_id in range(num_scenarios):
diff --git a/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py b/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py
index 2c6a7c9e8..029119094 100644
--- a/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py
+++ b/modules/performRegionalEventSimulation/regionalWindField/ComputeIntensityMeasure.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -46,11 +46,11 @@
import numpy as np
import pandas as pd
-from WindFieldSimulation import *
+from WindFieldSimulation import * # noqa: F403
-def run_model(scen, p, t, path_perturb, feat_perturb, res_mp):
- model = LinearAnalyticalModel_SnaikiWu_2017(cyclone_param=p, storm_track=t)
+def run_model(scen, p, t, path_perturb, feat_perturb, res_mp): # noqa: D103
+ model = LinearAnalyticalModel_SnaikiWu_2017(cyclone_param=p, storm_track=t) # noqa: F405
if scen['Terrain']:
model.add_reference_terrain(scen['Terrain'])
model.set_cyclone_mesh(scen['StormMesh'])
@@ -62,25 +62,25 @@ def run_model(scen, p, t, path_perturb, feat_perturb, res_mp):
# this just an engineering judgement that the pressure difference, moving speed, and max-wind-speed radius
# should not be less than 0.0 in the value.
delta_feat[delta_feat < 0.0] = 0.0
- print('dLatitude, dLongtitude, dAngle = ', delta_path)
- print('dP, v, Rmax = ', delta_feat)
+ print('dLatitude, dLongtitude, dAngle = ', delta_path) # noqa: T201
+ print('dP, v, Rmax = ', delta_feat) # noqa: T201
model.set_delta_path(delta_path)
model.set_delta_feat(delta_feat)
model.compute_wind_field()
res_mp.append(model.get_station_data())
-def simulate_storm(scenarios, event_info, model_type):
+def simulate_storm(scenarios, event_info, model_type): # noqa: D103
if model_type == 'LinearAnalytical':
num_per_site = event_info['NumberPerSite']
if num_per_site == 1:
path_perturb = np.zeros(3)
feat_perturb = np.zeros(3)
- elif len(event_info.get('Perturbation', [])) != 6:
- print('ComputeIntensityMeasure: Perturbation should have a size of 6.')
+ elif len(event_info.get('Perturbation', [])) != 6: # noqa: PLR2004
+ print('ComputeIntensityMeasure: Perturbation should have a size of 6.') # noqa: T201
path_perturb = np.array([0.5, 0.5, 90.0])
feat_perturb = np.array([10.0, 10.0, 10.0])
- print(
+ print( # noqa: T201
'ComputeIntensityMeasure: [1.0, 1.0, 90.0, 10.0, 10.0, 10.0] is used for perturbations.'
)
else:
@@ -88,7 +88,7 @@ def simulate_storm(scenarios, event_info, model_type):
feat_perturb = np.array(event_info['Perturbation'][3:6])
for i in range(len(scenarios)):
if i == 1:
- print(
+ print( # noqa: T201
'ComputeIntensityMeasure: currently supporting single scenario simulation only.'
)
return -1
@@ -100,7 +100,7 @@ def simulate_storm(scenarios, event_info, model_type):
with mp.Manager() as manager:
res_mp = manager.list([])
proc_list = []
- for k in range(num_per_site):
+ for k in range(num_per_site): # noqa: B007
proc = mp.Process(
target=run_model,
args=(
@@ -120,10 +120,10 @@ def simulate_storm(scenarios, event_info, model_type):
proc = proc_list[k]
proc.join()
# extract data
- res = [x for x in res_mp]
+ res = [x for x in res_mp] # noqa: C416
else:
- print(
+ print( # noqa: T201
'ComputeIntensityMeasure: currently only supporting LinearAnalytical model'
)
@@ -131,7 +131,7 @@ def simulate_storm(scenarios, event_info, model_type):
return res
-def simulate_storm_cpp(
+def simulate_storm_cpp( # noqa: C901, D103
site_info,
scenario_info,
scenario_data,
@@ -161,19 +161,19 @@ def simulate_storm_cpp(
scenario_info['Storm']['Radius'] = scenario_data[0]['CycloneParam'][5]
config = {'Scenario': scenario_info, 'Event': event_info}
- abs_path_config = os.path.abspath(os.path.join(input_dir, 'SimuConfig.json'))
- with open(abs_path_config, 'w') as f:
+ abs_path_config = os.path.abspath(os.path.join(input_dir, 'SimuConfig.json')) # noqa: PTH100, PTH118
+ with open(abs_path_config, 'w') as f: # noqa: PTH123
json.dump(config, f)
# site file
- abs_path_site = os.path.abspath(
- os.path.join(input_dir, site_info['input_file'])
+ abs_path_site = os.path.abspath( # noqa: PTH100
+ os.path.join(input_dir, site_info['input_file']) # noqa: PTH118
)
# track file
- abs_path_track = os.path.abspath(
- os.path.join(input_dir, scenario_info['Storm']['Track'])
+ abs_path_track = os.path.abspath( # noqa: PTH100
+ os.path.join(input_dir, scenario_info['Storm']['Track']) # noqa: PTH118
)
if scenario_info['Generator'] == 'SimulationHist':
- df = pd.DataFrame.from_dict(
+ df = pd.DataFrame.from_dict( # noqa: PD901
{
'Lat': scenario_data[0]['StormTrack']['Latitude'],
'Lon': scenario_data[0]['StormTrack']['Longitude'],
@@ -182,35 +182,35 @@ def simulate_storm_cpp(
df.to_csv(abs_path_track, sep=',', header=False, index=False)
# lat_w file
if scenario_info['Storm'].get('TrackSimu', None):
- abs_path_latw = os.path.abspath(
- os.path.join(input_dir, scenario_info['Storm']['TrackSimu'])
+ abs_path_latw = os.path.abspath( # noqa: PTH100
+ os.path.join(input_dir, scenario_info['Storm']['TrackSimu']) # noqa: PTH118
)
else:
- abs_path_latw = os.path.abspath(
- os.path.join(input_dir, 'TrackSimu_populated.csv')
+ abs_path_latw = os.path.abspath( # noqa: PTH100
+ os.path.join(input_dir, 'TrackSimu_populated.csv') # noqa: PTH118
)
- df = pd.DataFrame.from_dict(
+ df = pd.DataFrame.from_dict( # noqa: PD901
{
'Lat': scenario_data[0]['TrackSimu'],
}
)
df.to_csv(abs_path_latw, sep=',', header=False, index=False)
if scenario_info['Generator'] == 'SimulationHist':
- df = pd.DataFrame.from_dict(
+ df = pd.DataFrame.from_dict( # noqa: PD901
{
'Lat': scenario_data[0]['TrackSimu'],
}
)
df.to_csv(abs_path_latw, sep=',', header=False, index=False)
# terrain file
- if 'Terrain' in scenario_info.keys():
- abs_path_terrain = os.path.abspath(
- os.path.join(input_dir, scenario_info['Terrain'])
+ if 'Terrain' in scenario_info.keys(): # noqa: SIM118
+ abs_path_terrain = os.path.abspath( # noqa: PTH100
+ os.path.join(input_dir, scenario_info['Terrain']) # noqa: PTH118
)
else:
# default terrain z0 = 0.01 everywhere for the defined domain
- abs_path_terrain = os.path.abspath(
- os.path.join(input_dir, 'DefaultTerrain.geojson')
+ abs_path_terrain = os.path.abspath( # noqa: PTH100
+ os.path.join(input_dir, 'DefaultTerrain.geojson') # noqa: PTH118
)
dict_dt = {
'type': 'FeatureCollection',
@@ -233,7 +233,7 @@ def simulate_storm_cpp(
}
],
}
- with open(abs_path_terrain, 'w') as f:
+ with open(abs_path_terrain, 'w') as f: # noqa: PTH123
json.dump(dict_dt, f, indent=2)
# configuring perturbation
@@ -241,11 +241,11 @@ def simulate_storm_cpp(
if num_per_site == 1:
path_perturb = np.zeros(3)
feat_perturb = np.zeros(3)
- elif len(event_info.get('Perturbation', [])) != 6:
- print('ComputeIntensityMeasure: Perturbation should have a size of 6.')
+ elif len(event_info.get('Perturbation', [])) != 6: # noqa: PLR2004
+ print('ComputeIntensityMeasure: Perturbation should have a size of 6.') # noqa: T201
path_perturb = np.array([0.5, 0.5, 90.0])
feat_perturb = np.array([10.0, 10.0, 10.0])
- print(
+ print( # noqa: T201
'ComputeIntensityMeasure: [1.0, 1.0, 90.0, 10.0, 10.0, 10.0] is used for perturbations.'
)
else:
@@ -253,7 +253,7 @@ def simulate_storm_cpp(
feat_perturb = np.array(event_info['Perturbation'][3:6])
for i in range(int(scenario_info['Number'])):
if i == 1:
- print(
+ print( # noqa: T201
'ComputeIntensityMeasure: currently supporting single scenario simulation only.'
)
return -1
@@ -264,9 +264,9 @@ def simulate_storm_cpp(
args_list = []
odir_list = []
if sys.platform.startswith('win'):
- windsimu_bin = os.path.dirname(__file__) + '/WindFieldSimulation.exe'
+ windsimu_bin = os.path.dirname(__file__) + '/WindFieldSimulation.exe' # noqa: PTH120
else:
- windsimu_bin = os.path.dirname(__file__) + '/WindFieldSimulation'
+ windsimu_bin = os.path.dirname(__file__) + '/WindFieldSimulation' # noqa: PTH120
# preparing files
for j in range(num_per_site):
delta_path = (np.random.rand(3) - 0.5) * path_perturb
@@ -279,19 +279,19 @@ def simulate_storm_cpp(
'dV': delta_feat[1],
'dR': delta_feat[2],
}
- abs_path_pert = os.path.abspath(
- os.path.join(input_dir, 'Perturbation' + str(j) + '.json')
+ abs_path_pert = os.path.abspath( # noqa: PTH100
+ os.path.join(input_dir, 'Perturbation' + str(j) + '.json') # noqa: PTH118
)
- with open(abs_path_pert, 'w') as f:
+ with open(abs_path_pert, 'w') as f: # noqa: PTH123
json.dump(pert_dict, f)
- print('dLatitude, dLongtitude, dAngle = ', delta_path)
- print('dP, dv, dR = ', delta_feat)
- output_subdir = os.path.abspath(
- os.path.join(output_dir, 'simu' + str(j))
+ print('dLatitude, dLongtitude, dAngle = ', delta_path) # noqa: T201
+ print('dP, dv, dR = ', delta_feat) # noqa: T201
+ output_subdir = os.path.abspath( # noqa: PTH100
+ os.path.join(output_dir, 'simu' + str(j)) # noqa: PTH118
)
- if os.path.exists(output_subdir):
+ if os.path.exists(output_subdir): # noqa: PTH110
shutil.rmtree(output_subdir)
- os.makedirs(output_subdir)
+ os.makedirs(output_subdir) # noqa: PTH103
args = [
windsimu_bin,
'--config',
@@ -316,47 +316,47 @@ def simulate_storm_cpp(
args_list.append(args)
odir_list.append(output_subdir)
# running
- print('ComputeIntensityMeaure: running analysis.')
+ print('ComputeIntensityMeaure: running analysis.') # noqa: T201
procs_list = [
- subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # noqa: S603
for cmd in args_list
]
for proc in procs_list:
proc.communicate()
# loading output
- print('ComputeIntensityMeaure: postprocessing simulation data.')
+ print('ComputeIntensityMeaure: postprocessing simulation data.') # noqa: T201
for j in range(num_per_site):
- os.remove(pert_list[j])
+ os.remove(pert_list[j]) # noqa: PTH107
station_res = {
'Latitude': [],
'Longitude': [],
'z0': [],
'PWS': {'height': [], 'duration': 600.0, 'windspeed': []},
}
- df = pd.read_csv(
- os.path.join(os.path.abspath(odir_list[j]), 'StationZ0.csv'),
+ df = pd.read_csv( # noqa: PD901
+ os.path.join(os.path.abspath(odir_list[j]), 'StationZ0.csv'), # noqa: PTH100, PTH118
header=None,
index_col=None,
)
- station_res['z0'] = list(np.concatenate(df.values.tolist()).flat)
- df = pd.read_csv(
- os.path.join(os.path.abspath(odir_list[j]), 'MeasureHeight.csv'),
+ station_res['z0'] = list(np.concatenate(df.values.tolist()).flat) # noqa: PD011
+ df = pd.read_csv( # noqa: PD901
+ os.path.join(os.path.abspath(odir_list[j]), 'MeasureHeight.csv'), # noqa: PTH100, PTH118
header=None,
index_col=None,
)
- station_res['PWS']['height'] = df.values.tolist()[0]
- df = pd.read_csv(
- os.path.join(os.path.abspath(odir_list[j]), 'MaxWindSpeed.csv'),
+ station_res['PWS']['height'] = df.values.tolist()[0] # noqa: PD011
+ df = pd.read_csv( # noqa: PD901
+ os.path.join(os.path.abspath(odir_list[j]), 'MaxWindSpeed.csv'), # noqa: PTH100, PTH118
header=None,
index_col=None,
)
- station_res['PWS']['windspeed'] = df.values.tolist()
+ station_res['PWS']['windspeed'] = df.values.tolist() # noqa: PD011
res.append(station_res)
shutil.rmtree(odir_list[j])
# house-keeping
- os.remove(abs_path_config)
+ os.remove(abs_path_config) # noqa: PTH107
else:
- print(
+ print( # noqa: T201
'ComputeIntensityMeasure: currently only supporting LinearAnalytical model'
)
@@ -364,8 +364,8 @@ def simulate_storm_cpp(
return res
-def convert_wind_speed(event_info, simu_res):
- print(
+def convert_wind_speed(event_info, simu_res): # noqa: D103
+ print( # noqa: T201
'ComputeIntensityMeasure: converting peak wind speed to specified exposure, measuring height, and gust duration.'
)
@@ -379,7 +379,7 @@ def convert_wind_speed(event_info, simu_res):
else:
exposure = event_info['IntensityMeasure']['Exposure']
if exposure not in ['A', 'B', 'C', 'D']:
- print('ComputeIntensityMeasure: the Exposure should be A, B, C, or D.')
+ print('ComputeIntensityMeasure: the Exposure should be A, B, C, or D.') # noqa: T201
return -1
gust_duration = event_info['IntensityMeasure']['GustDuration']
reference_height = event_info['IntensityMeasure']['ReferenceHeight']
@@ -397,7 +397,7 @@ def convert_wind_speed(event_info, simu_res):
gust_duration_simu = cur_res['PWS']['duration']
# quick check the size
if pws_raw.shape[1] != len(measure_height):
- print(
+ print( # noqa: T201
'ComputeIntensityMeasure: please check the output wind speed results.'
)
return -1
@@ -419,23 +419,23 @@ def convert_wind_speed(event_info, simu_res):
zg_t = 274.32
# conversion
pws_raw = interp_wind_by_height(pws_raw, measure_height, reference_height)
- print(np.max(pws_raw))
+ print(np.max(pws_raw)) # noqa: T201
# computing gradient-height wind speed
pws_tmp = pws_raw * (zg / reference_height) ** (1.0 / alpha)
# converting exposure
pws_tmp = pws_tmp * (reference_height / zg_t) ** (1.0 / alpha_t)
pws = pws_tmp * gust_factor_ESDU(gust_duration_simu, gust_duration)
- print(np.max(pws))
+ print(np.max(pws)) # noqa: T201
# appending to pws_mr
pws_mr.append(pws)
- print('ComputeIntensityMeasure: wind speed conversion completed.')
+ print('ComputeIntensityMeasure: wind speed conversion completed.') # noqa: T201
# return
return pws_mr
def interp_wind_by_height(pws_ip, height_simu, height_ref):
- """interp_wind_by_height: interpolating the wind simulation results by the reference height"""
+ """interp_wind_by_height: interpolating the wind simulation results by the reference height""" # noqa: D400
num_stat = pws_ip.shape[0]
pws_op = np.zeros(num_stat)
for i in range(num_stat):
@@ -451,8 +451,8 @@ def interp_wind_by_height(pws_ip, height_simu, height_ref):
return pws_op
-def gust_factor_ESDU(gd_c, gd_t):
- """gust_factor_ESDU: return a gust facto between gd_c and gd_t"""
+def gust_factor_ESDU(gd_c, gd_t): # noqa: N802
+ """gust_factor_ESDU: return a gust facto between gd_c and gd_t""" # noqa: D400
# gust duration (sec)
gd = [
1.0,
@@ -475,11 +475,11 @@ def gust_factor_ESDU(gd_c, gd_t):
gd_c, gd, gf, left=gf[0], right=gf[-1]
)
# return
- return gf_t
+ return gf_t # noqa: RET504
-def export_pws(stations, pws, output_dir, filename='EventGrid.csv'):
- print('ComputeIntensityMeasure: saving results.')
+def export_pws(stations, pws, output_dir, filename='EventGrid.csv'): # noqa: D103
+ print('ComputeIntensityMeasure: saving results.') # noqa: T201
# collecting site locations
lat = []
@@ -492,15 +492,15 @@ def export_pws(stations, pws, output_dir, filename='EventGrid.csv'):
station_num = len(lat)
csv_file = [str(x + 1) + '.csv' for x in range(station_num)]
d = {'GP_file': csv_file, 'Latitude': lat, 'Longitude': lon}
- df = pd.DataFrame.from_dict(d)
- df.to_csv(os.path.join(output_dir, filename), index=False)
+ df = pd.DataFrame.from_dict(d) # noqa: PD901
+ df.to_csv(os.path.join(output_dir, filename), index=False) # noqa: PTH118
for i in range(station_num):
pws_op = [pws[0][i]]
if len(pws) > 1:
for j in range(len(pws) - 1):
- pws_op.append(pws[j + 1][i])
+ pws_op.append(pws[j + 1][i]) # noqa: PERF401
d = {'PWS': pws_op}
- df = pd.DataFrame.from_dict(d)
- df.to_csv(os.path.join(output_dir, csv_file[i]), index=False)
+ df = pd.DataFrame.from_dict(d) # noqa: PD901
+ df.to_csv(os.path.join(output_dir, csv_file[i]), index=False) # noqa: PTH118
- print('ComputeIntensityMeasure: simulated wind speed field saved.')
+ print('ComputeIntensityMeasure: simulated wind speed field saved.') # noqa: T201
diff --git a/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py b/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py
index 5fcf99900..c420b6e15 100644
--- a/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py
+++ b/modules/performRegionalEventSimulation/regionalWindField/CreateScenario.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -44,7 +44,7 @@
import pandas as pd
-def create_wind_scenarios(scenario_info, event_info, stations, data_dir):
+def create_wind_scenarios(scenario_info, event_info, stations, data_dir): # noqa: C901, D103
# Number of scenarios
source_num = scenario_info.get('Number', 1)
# Directly defining earthquake ruptures
@@ -60,34 +60,34 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir):
# Track data
try:
track_file = scenario_info['Storm'].get('Track')
- df = pd.read_csv(
- os.path.join(data_dir, track_file),
+ df = pd.read_csv( # noqa: PD901
+ os.path.join(data_dir, track_file), # noqa: PTH118
header=None,
index_col=None,
)
track = {
- 'Latitude': df.iloc[:, 0].values.tolist(),
- 'Longitude': df.iloc[:, 1].values.tolist(),
+ 'Latitude': df.iloc[:, 0].values.tolist(), # noqa: PD011
+ 'Longitude': df.iloc[:, 1].values.tolist(), # noqa: PD011
}
- except:
- print(
+ except: # noqa: E722
+ print( # noqa: T201
'CreateScenario: error - no storm track provided or file format not accepted.'
)
# Save Lat_w.csv
track_simu_file = scenario_info['Storm'].get('TrackSimu', None)
if track_simu_file:
- df = pd.read_csv(
- os.path.join(data_dir, track_simu_file),
+ df = pd.read_csv( # noqa: PD901
+ os.path.join(data_dir, track_simu_file), # noqa: PTH118
header=None,
index_col=None,
)
- track_simu = df.iloc[:, 0].values.tolist()
+ track_simu = df.iloc[:, 0].values.tolist() # noqa: PD011
else:
track_simu = track['Latitude']
# Reading Terrain info (if provided)
terrain_file = scenario_info.get('Terrain', None)
if terrain_file:
- with open(os.path.join(data_dir, terrain_file)) as f:
+ with open(os.path.join(data_dir, terrain_file)) as f: # noqa: PTH118, PTH123
terrain_data = json.load(f)
else:
terrain_data = []
@@ -100,8 +100,8 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir):
param.append(scenario_info['Storm']['Landfall']['Pressure'])
param.append(scenario_info['Storm']['Landfall']['Speed'])
param.append(scenario_info['Storm']['Landfall']['Radius'])
- except:
- print('CreateScenario: please provide all needed landfall properties.')
+ except: # noqa: E722
+ print('CreateScenario: please provide all needed landfall properties.') # noqa: T201
# Monte-Carlo
# del_par = [0, 0, 0] # default
# Parsing mesh configurations
@@ -110,7 +110,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir):
# Wind speed measuring height
measure_height = event_info['IntensityMeasure']['MeasureHeight']
# Saving results
- scenario_data = dict()
+ scenario_data = dict() # noqa: C408
for i in range(source_num):
scenario_data.update(
{
@@ -130,7 +130,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir):
return scenario_data
# Using the properties of a historical storm to do simulation
- elif scenario_info['Generator'] == 'SimulationHist':
+ elif scenario_info['Generator'] == 'SimulationHist': # noqa: RET505
# Collecting site locations
lat = []
lon = []
@@ -141,8 +141,8 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir):
station_list = {'Latitude': lat, 'Longitude': lon}
# Loading historical storm database
df_hs = pd.read_csv(
- os.path.join(
- os.path.dirname(__file__),
+ os.path.join( # noqa: PTH118
+ os.path.dirname(__file__), # noqa: PTH120
'database/historical_storm/ibtracs.last3years.list.v04r00.csv',
),
header=[0, 1],
@@ -152,46 +152,46 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir):
try:
storm_name = scenario_info['Storm'].get('Name')
storm_year = scenario_info['Storm'].get('Year')
- except:
- print('CreateScenario: error - no storm name or year is provided.')
+ except: # noqa: E722
+ print('CreateScenario: error - no storm name or year is provided.') # noqa: T201
# Searching the storm
try:
df_chs = df_hs[df_hs[('NAME', ' ')] == storm_name]
df_chs = df_chs[df_chs[('SEASON', 'Year')] == storm_year]
- except:
- print('CreateScenario: error - the storm is not found.')
+ except: # noqa: E722
+ print('CreateScenario: error - the storm is not found.') # noqa: T201
if len(df_chs.values) == 0:
- print('CreateScenario: error - the storm is not found.')
+ print('CreateScenario: error - the storm is not found.') # noqa: T201
return 1
# Collecting storm properties
track_lat = []
track_lon = []
- for x in df_chs[('USA_LAT', 'degrees_north')].values.tolist():
+ for x in df_chs[('USA_LAT', 'degrees_north')].values.tolist(): # noqa: PD011
if x != ' ':
- track_lat.append(float(x))
- for x in df_chs[('USA_LON', 'degrees_east')].values.tolist():
+ track_lat.append(float(x)) # noqa: PERF401
+ for x in df_chs[('USA_LON', 'degrees_east')].values.tolist(): # noqa: PD011
if x != ' ':
- track_lon.append(float(x))
+ track_lon.append(float(x)) # noqa: PERF401
# If the default option (USA_LAT and USA_LON) is not available, switching to LAT and LON
if len(track_lat) == 0:
- print(
+ print( # noqa: T201
'CreateScenario: warning - the USA_LAT and USA_LON are not available, switching to LAT and LON.'
)
- for x in df_chs[('LAT', 'degrees_north')].values.tolist():
+ for x in df_chs[('LAT', 'degrees_north')].values.tolist(): # noqa: PD011
if x != ' ':
- track_lat.append(float(x))
- for x in df_chs[('LON', 'degrees_east')].values.tolist():
+ track_lat.append(float(x)) # noqa: PERF401
+ for x in df_chs[('LON', 'degrees_east')].values.tolist(): # noqa: PD011
if x != ' ':
- track_lon.append(float(x))
+ track_lon.append(float(x)) # noqa: PERF401
if len(track_lat) == 0:
- print('CreateScenario: error - no track data is found.')
+ print('CreateScenario: error - no track data is found.') # noqa: T201
return 1
# Saving the track
track = {'Latitude': track_lat, 'Longitude': track_lon}
# Reading Terrain info (if provided)
terrain_file = scenario_info.get('Terrain', None)
if terrain_file:
- with open(os.path.join(data_dir, terrain_file)) as f:
+ with open(os.path.join(data_dir, terrain_file)) as f: # noqa: PTH118, PTH123
terrain_data = json.load(f)
else:
terrain_data = []
@@ -199,12 +199,12 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir):
dist2land = []
for x in df_chs[('DIST2LAND', 'km')]:
if x != ' ':
- dist2land.append(x)
+ dist2land.append(x) # noqa: PERF401
if len(track_lat) == 0:
- print('CreateScenario: error - no landing information is found.')
+ print('CreateScenario: error - no landing information is found.') # noqa: T201
return 1
if 0 not in dist2land:
- print(
+ print( # noqa: T201
'CreateScenario: warning - no landing fall is found, using the closest location.'
)
tmploc = dist2land.index(min(dist2land))
@@ -216,19 +216,19 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir):
track_simu_file = scenario_info['Storm'].get('TrackSimu', None)
if track_simu_file:
try:
- df = pd.read_csv(
- os.path.join(data_dir, track_simu_file),
+ df = pd.read_csv( # noqa: PD901
+ os.path.join(data_dir, track_simu_file), # noqa: PTH118
header=None,
index_col=None,
)
- track_simu = df.iloc[:, 0].values.tolist()
- except:
- print(
+ track_simu = df.iloc[:, 0].values.tolist() # noqa: PD011
+ except: # noqa: E722
+ print( # noqa: T201
'CreateScenario: warning - TrackSimu file not found, using the full track.'
)
track_simu = track_lat
else:
- print(
+ print( # noqa: T201
'CreateScenario: warning - no truncation defined, using the full track.'
)
# tmp = track_lat
@@ -239,22 +239,22 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir):
try:
landfall_lat = float(df_chs[('USA_LAT', 'degrees_north')].iloc[tmploc])
landfall_lon = float(df_chs[('USA_LON', 'degrees_east')].iloc[tmploc])
- except:
+ except: # noqa: E722
# If the default option (USA_LAT and USA_LON) is not available, switching to LAT and LON
landfall_lat = float(df_chs[('LAT', 'degrees_north')].iloc[tmploc])
landfall_lon = float(df_chs[('LON', 'degrees_east')].iloc[tmploc])
try:
landfall_ang = float(df_chs[('STORM_DIR', 'degrees')].iloc[tmploc])
- except:
- print('CreateScenario: error - no landing angle is found.')
- if landfall_ang > 180.0:
+ except: # noqa: E722
+ print('CreateScenario: error - no landing angle is found.') # noqa: T201
+ if landfall_ang > 180.0: # noqa: PLR2004
landfall_ang = landfall_ang - 360.0
landfall_prs = (
1013.0
- np.min(
[
float(x)
- for x in df_chs[('USA_PRES', 'mb')]
+ for x in df_chs[('USA_PRES', 'mb')] # noqa: PD011
.iloc[tmploc - 5 :]
.values.tolist()
if x != ' '
@@ -268,17 +268,17 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir):
landfall_rad = (
float(df_chs[('USA_RMW', 'nmile')].iloc[tmploc]) * 1.60934
) # convert nmile to km
- except:
+ except: # noqa: E722
# No available radius of maximum wind is found
- print('CreateScenario: warning - switching to REUNION_RMW.')
+ print('CreateScenario: warning - switching to REUNION_RMW.') # noqa: T201
try:
# If the default option (USA_RMW) is not available, switching to REUNION_RMW
landfall_rad = (
float(df_chs[('REUNION_RMW', 'nmile')].iloc[tmploc]) * 1.60934
) # convert nmile to km
- except:
+ except: # noqa: E722
# No available radius of maximum wind is found
- print(
+ print( # noqa: T201
'CreateScenario: warning - no available radius of maximum wind is found, using a default 50 km.'
)
landfall_rad = 50
@@ -297,7 +297,7 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir):
# Wind speed measuring height
measure_height = event_info['IntensityMeasure']['MeasureHeight']
# Saving results
- scenario_data = dict()
+ scenario_data = dict() # noqa: C408
for i in range(source_num):
scenario_data.update(
{
@@ -317,4 +317,4 @@ def create_wind_scenarios(scenario_info, event_info, stations, data_dir):
return scenario_data
else:
- print('CreateScenario: currently only supporting Simulation generator.')
+ print('CreateScenario: currently only supporting Simulation generator.') # noqa: T201, RET503
diff --git a/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py b/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py
index 32aa6633c..0e14e611b 100644
--- a/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py
+++ b/modules/performRegionalEventSimulation/regionalWindField/CreateStation.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -43,13 +43,13 @@
import pandas as pd
-def get_label(options, labels, label_name):
+def get_label(options, labels, label_name): # noqa: D103
for option in options:
if option in labels:
labels = labels[labels != option]
return option, labels
- print(f'WARNING: Could not identify the label for the {label_name}')
+ print(f'WARNING: Could not identify the label for the {label_name}') # noqa: T201, RET503
def create_stations(input_file, output_file, min_id, max_id):
@@ -61,14 +61,14 @@ def create_stations(input_file, output_file, min_id, max_id):
max_id: the max ID to end
Output:
run_tag: 0 - success, 1 - input failure, 2 - output failure
- """
+ """ # noqa: D205, D400, D401
# Reading csv data
run_tag = 1
try:
stn_df = pd.read_csv(input_file, header=0, index_col=0)
- except:
+ except: # noqa: E722
run_tag = 0
- return run_tag
+ return run_tag # noqa: RET504
# Max and Min IDs
stn_ids_min = np.min(stn_df.index.values)
stn_ids_max = np.max(stn_df.index.values)
@@ -80,7 +80,7 @@ def create_stations(input_file, output_file, min_id, max_id):
max_id = np.min([stn_ids_max, max_id])
selected_stn = stn_df.loc[min_id:max_id, :]
# Extracting data
- labels = selected_stn.columns.values
+ labels = selected_stn.columns.values # noqa: PD011
lon_label, labels = get_label(
['Longitude', 'longitude', 'lon', 'Lon'], labels, 'longitude'
)
@@ -94,7 +94,7 @@ def create_stations(input_file, output_file, min_id, max_id):
stn_file['Stations'].append(tmp)
# Saving data to the output file
if output_file:
- with open(output_file, 'w') as f:
+ with open(output_file, 'w') as f: # noqa: PTH123
json.dump(stn_file, f, indent=2)
# Returning the final run state
return stn_file
diff --git a/modules/performRegionalEventSimulation/regionalWindField/HurricaneSimulation.py b/modules/performRegionalEventSimulation/regionalWindField/HurricaneSimulation.py
index ca405d29a..30bca09e6 100644
--- a/modules/performRegionalEventSimulation/regionalWindField/HurricaneSimulation.py
+++ b/modules/performRegionalEventSimulation/regionalWindField/HurricaneSimulation.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2021 Leland Stanford Junior University
# Copyright (c) 2021 The Regents of the University of California
#
@@ -43,13 +43,13 @@
import os
import sys
-from ComputeIntensityMeasure import *
-from CreateScenario import *
-from CreateStation import *
+from ComputeIntensityMeasure import * # noqa: F403
+from CreateScenario import * # noqa: F403
+from CreateStation import * # noqa: F403
if __name__ == '__main__':
logger = logging.getLogger()
- handlerStream = logging.StreamHandler(sys.stdout)
+ handlerStream = logging.StreamHandler(sys.stdout) # noqa: N816
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
@@ -59,7 +59,7 @@
parser = argparse.ArgumentParser()
parser.add_argument('--hazard_config')
args = parser.parse_args()
- with open(args.hazard_config) as f:
+ with open(args.hazard_config) as f: # noqa: PTH123
hazard_info = json.load(f)
# Directory
@@ -68,55 +68,55 @@
input_dir = dir_info['Input']
output_dir = dir_info['Output']
try:
- os.mkdir(f'{output_dir}')
- except:
- print('HurricaneSimulation: output folder already exists.')
+ os.mkdir(f'{output_dir}') # noqa: PTH102
+ except: # noqa: E722
+ print('HurricaneSimulation: output folder already exists.') # noqa: T201
# Sites and stations
- print('HurricaneSimulation: creating stations.')
+ print('HurricaneSimulation: creating stations.') # noqa: T201
site_info = hazard_info['Site']
if site_info['Type'] == 'From_CSV':
- input_file = os.path.join(input_dir, site_info['input_file'])
+ input_file = os.path.join(input_dir, site_info['input_file']) # noqa: PTH118
output_file = site_info.get('output_file', False)
if output_file:
- output_file = os.path.join(output_dir, output_file)
- min_ID = site_info['min_ID']
- max_ID = site_info['max_ID']
+ output_file = os.path.join(output_dir, output_file) # noqa: PTH118
+ min_ID = site_info['min_ID'] # noqa: N816
+ max_ID = site_info['max_ID'] # noqa: N816
# Creating stations from the csv input file
- stations = create_stations(input_file, output_file, min_ID, max_ID)
+ stations = create_stations(input_file, output_file, min_ID, max_ID) # noqa: F405
if stations:
- print('HurricaneSimulation: stations created.')
+ print('HurricaneSimulation: stations created.') # noqa: T201
else:
- print(
+ print( # noqa: T201
'HurricaneSimulation: please check the "Input" directory in the configuration json file.'
)
- exit()
+ exit() # noqa: PLR1722
# Scenarios
- print('HurricaneSimulation: creating scenarios.')
+ print('HurricaneSimulation: creating scenarios.') # noqa: T201
scenario_info = hazard_info['Scenario']
if scenario_info['Type'] == 'Wind':
# Creating wind scenarios
event_info = hazard_info['Event']
- scenarios = create_wind_scenarios(
+ scenarios = create_wind_scenarios( # noqa: F405
scenario_info, event_info, stations, input_dir
)
else:
- print('HurricaneSimulation: currently only supports wind simulations.')
- print('HurricaneSimulation: scenarios created.')
+ print('HurricaneSimulation: currently only supports wind simulations.') # noqa: T201
+ print('HurricaneSimulation: scenarios created.') # noqa: T201
# Computing intensity measures
- print('HurricaneSimulation: computing intensity measures.')
+ print('HurricaneSimulation: computing intensity measures.') # noqa: T201
if scenario_info['Type'] == 'Wind':
if 'Simulation' in scenario_info['Generator']:
if scenario_info['ModelType'] == 'LinearAnalyticalPy':
# simulating storm
- storm_simu = simulate_storm(
+ storm_simu = simulate_storm( # noqa: F405
scenarios, event_info, 'LinearAnalytical'
)
elif scenario_info['ModelType'] == 'LinearAnalytical':
# simulation storm (c++ binary)
- storm_simu = simulate_storm_cpp(
+ storm_simu = simulate_storm_cpp( # noqa: F405
site_info,
scenario_info,
scenarios,
@@ -125,17 +125,17 @@
dir_info,
)
else:
- print(
+ print( # noqa: T201
'HurricaneSimulation: currently supporting LinearAnalytical model type.'
)
# converting peak wind speed
- pws = convert_wind_speed(event_info, storm_simu)
+ pws = convert_wind_speed(event_info, storm_simu) # noqa: F405
# saving results
- export_pws(stations, pws, output_dir, filename='EventGrid.csv')
+ export_pws(stations, pws, output_dir, filename='EventGrid.csv') # noqa: F405
else:
- print('HurricaneSimulation: currently only supporting wind simulations.')
+ print('HurricaneSimulation: currently only supporting wind simulations.') # noqa: T201
else:
- print(
+ print( # noqa: T201
'HurricaneSimulation currently only supports earthquake and wind simulations.'
)
- print('HurricaneSimulation: intensity measures computed.')
+ print('HurricaneSimulation: intensity measures computed.') # noqa: T201
diff --git a/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py b/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py
index 83aadaeba..c3d5ef3ce 100644
--- a/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py
+++ b/modules/performRegionalEventSimulation/regionalWindField/WindFieldSimulation.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -55,8 +55,8 @@
from shapely.geometry import Point, Polygon
-class LinearAnalyticalModel_SnaikiWu_2017:
- def __init__(self, cyclone_param=[], storm_track=[]):
+class LinearAnalyticalModel_SnaikiWu_2017: # noqa: D101
+ def __init__(self, cyclone_param=[], storm_track=[]): # noqa: B006
"""__init__: initializing the tropical cyclone
cyclone_param: 6-dimensional array
- cyclone_param[0]: landfall Latitude
@@ -68,7 +68,7 @@ def __init__(self, cyclone_param=[], storm_track=[]):
storm_track:
- storm_track['Latitude']: latitude values of the storm track
- storm_track['Longittude']: longitude values of the storm track
- """
+ """ # noqa: D205, D400
# constants
self.R = 6371.0 * 1e3
self.EDDY_VISCOCITY = 75.0
@@ -90,15 +90,15 @@ def __init__(self, cyclone_param=[], storm_track=[]):
+ 0.00184 * self.cyclone_pres / 100.0
- 0.00309 * self.cyclone_radi
)
- except:
- print('WindFieldSimulaiton: please check the cyclone_param input.')
+ except: # noqa: E722
+ print('WindFieldSimulaiton: please check the cyclone_param input.') # noqa: T201
# saving storm track data
try:
self.track_lat = storm_track['Latitude']
self.track_lon = storm_track['Longitude']
if len(self.track_lat) != len(self.track_lon):
- print(
+ print( # noqa: T201
'WindFieldSimulation: warning - storm track Latitude and Longitude sizes are different, data truncated.'
)
self.track_lat = self.track_lat[
@@ -107,8 +107,8 @@ def __init__(self, cyclone_param=[], storm_track=[]):
self.track_lon = self.track_lon[
0 : int(min(len(self.track_lat), len(self.track_lon)))
]
- except:
- print('WindFieldSimulaiton: please check the strom_track input.')
+ except: # noqa: E722
+ print('WindFieldSimulaiton: please check the strom_track input.') # noqa: T201
# initiation
self.station_num = 0
@@ -128,17 +128,17 @@ def __init__(self, cyclone_param=[], storm_track=[]):
self.mesh_info = []
def set_delta_path(self, delta_path):
- """set_delta_path: perturbing the path coordinates and heading angle of the storm track"""
- if len(delta_path) == 3:
+ """set_delta_path: perturbing the path coordinates and heading angle of the storm track""" # noqa: D400
+ if len(delta_path) == 3: # noqa: PLR2004
self.delta_path = delta_path
else:
- print(
+ print( # noqa: T201
'WindFieldSimulation: the delta_path should have a size of 3, default delta_path used.'
)
def set_delta_feat(self, delta_feat):
- """set_delta_feat: perturbing the central pressure difference, traslational speed, and max-wind-speed radius"""
- if len(delta_feat) == 3:
+ """set_delta_feat: perturbing the central pressure difference, traslational speed, and max-wind-speed radius""" # noqa: D400
+ if len(delta_feat) == 3: # noqa: PLR2004
self.cyclone_pres = delta_feat[0] * 100.0
self.cyclone_sped = delta_feat[1] * 1000.0 / 3600.0
self.cyclone_radi = delta_feat[2]
@@ -149,12 +149,12 @@ def set_delta_feat(self, delta_feat):
- 0.00309 * self.cyclone_radi
)
else:
- print(
+ print( # noqa: T201
'WindFieldSimulation: the delta_feat should have a size of 3, default delta_feat used.'
)
def __interp_z0(self, lat, lon):
- """__interp_z0: finding the z0 at (lat, lon) by interpolating reference terrain polygons"""
+ """__interp_z0: finding the z0 at (lat, lon) by interpolating reference terrain polygons""" # noqa: D400
z0 = []
if not self.terrain_z0:
# no reference terrain provided, using default reference z0 = 0.03
@@ -173,7 +173,7 @@ def __interp_z0(self, lat, lon):
def add_reference_terrain(self, terrain_info):
"""add_reference_terrainL specifying reference z0 values for a set of polygons
terrain_info: geojson formatted polygon and z0 data
- """
+ """ # noqa: D205, D400
for p in terrain_info['features']:
if p['geometry']['type'] == 'Polygon':
# creating a new polygon
@@ -190,7 +190,7 @@ def set_cyclone_mesh(self, mesh_info):
mesh_info[3]: starting angle (usually 0)
mesh_info[4]: interval angle
mesh_info[5]: ending angle (usually 360)
- """
+ """ # noqa: D205, D400
try:
self.mesh_info = mesh_info
self.r = np.arange(
@@ -199,28 +199,28 @@ def set_cyclone_mesh(self, mesh_info):
self.theta = np.arange(
mesh_info[3], mesh_info[5] + mesh_info[4], mesh_info[4]
)
- print('WindFieldSimulation: cyclone meshed.')
- except:
- print('WindFieldSimulation: input format error in set_cyclone_mesh.')
+ print('WindFieldSimulation: cyclone meshed.') # noqa: T201
+ except: # noqa: E722
+ print('WindFieldSimulation: input format error in set_cyclone_mesh.') # noqa: T201
def set_track_mesh(self, mesh_lat):
"""set_track_meesh: meshing the storm track
mesh_lat[0]: starting latitude value of the meshed track
mesh_lat[1]: interval latitude value
mesh_lat[2]: ending latitude value of the meshed track
- """
+ """ # noqa: D205, D400
try:
lat0 = mesh_lat[0]
dlat = mesh_lat[1]
lat1 = mesh_lat[2]
- except:
- print('WindFieldSimulation: input format error in set_track_mesh.')
+ except: # noqa: E722
+ print('WindFieldSimulation: input format error in set_track_mesh.') # noqa: T201
# boundary checks
if (max(lat0, lat1) > max(self.track_lat)) or (
min(lat0, lat1) < min(self.track_lat)
):
- print(
+ print( # noqa: T201
'WindFieldSimulation: warning - forcing the track mesh consistent with the original track boundary.'
)
lat0 = min(lat0, max(self.track_lat))
@@ -233,30 +233,30 @@ def set_track_mesh(self, mesh_lat):
self.track_lon_m = np.abs(
np.interp(self.track_lat_m, self.track_lat, self.track_lon)
)
- print('WindFieldSimulation: track meshed.')
+ print('WindFieldSimulation: track meshed.') # noqa: T201
def define_track(self, track_lat):
"""set_track_meesh: meshing the storm track
mesh_lat[0]: starting latitude value of the meshed track
mesh_lat[1]: interval latitude value
mesh_lat[2]: ending latitude value of the meshed track
- """
+ """ # noqa: D205, D400
# computing meshed track's Latitude and Longitude values
self.track_lat_m = track_lat
self.track_lon_m = np.abs(
np.interp(self.track_lat_m, self.track_lat, self.track_lon)
)
- print('WindFieldSimulation: track defined.')
+ print('WindFieldSimulation: track defined.') # noqa: T201
def set_measure_height(self, measure_info):
- """set_measure_height: defining the height for calculating wind speed"""
+ """set_measure_height: defining the height for calculating wind speed""" # noqa: D400
try:
self.zp = np.arange(
measure_info[0], measure_info[2] + measure_info[1], measure_info[1]
).tolist()
- print('WindFieldSimulation: measurement height defined.')
- except:
- print('WindFieldSimulation: input format error in set_measure_height.')
+ print('WindFieldSimulation: measurement height defined.') # noqa: T201
+ except: # noqa: E722
+ print('WindFieldSimulation: input format error in set_measure_height.') # noqa: T201
def add_stations(self, station_list):
"""add_stations: adding stations to the model
@@ -264,9 +264,9 @@ def add_stations(self, station_list):
- station_list['Latitude']: latitude values of stations
- station_list['Longitude']: longitude values of stations
- station_list['z0']: surface roughness (optional)
- """
+ """ # noqa: D205, D400
# z0 default
- if 'z0' not in station_list.keys():
+ if 'z0' not in station_list.keys(): # noqa: SIM118
# default value = 0 (no specified z0)
station_list['z0'] = np.zeros(len(station_list['Latitude']))
@@ -285,10 +285,10 @@ def add_stations(self, station_list):
self.station_num += 1
def __calculate_heading(self):
- """__calculate_heading: computing the heading path"""
+ """__calculate_heading: computing the heading path""" # noqa: D400
self.beta_c = np.zeros(len(self.track_lat_m))
for i in range(len(self.track_lat_m) - 1):
- Delta = self.track_lon_m[i + 1] - self.track_lon_m[i] + self.EPS**2
+ Delta = self.track_lon_m[i + 1] - self.track_lon_m[i] + self.EPS**2 # noqa: N806
self.beta_c[i] = (
-self.delta_path[2]
+ 90.0
@@ -309,8 +309,8 @@ def __calculate_heading(self):
self.beta_c[-1] = self.beta_c[-2]
def compute_wind_field(self):
- """compute_wind_field: computing the peak wind speed (10-min gust duraiton)"""
- print('WindFieldSimulation: running linear analytical model.')
+ """compute_wind_field: computing the peak wind speed (10-min gust duraiton)""" # noqa: D400
+ print('WindFieldSimulation: running linear analytical model.') # noqa: T201
# checking if all parameters are defined
# calculating heading
@@ -335,13 +335,13 @@ def compute_wind_field(self):
f = 2.0 * omega * np.sin(lat * np.pi / 180.0)
# looping over different polar coordinates theta
for j in range(len(self.theta)):
- Ctheta = -self.cyclone_sped * np.sin(
+ Ctheta = -self.cyclone_sped * np.sin( # noqa: N806
(self.theta[j] - beta) / self.RA
)
- if (self.theta[j] >= 0) and (self.theta[j] <= 90):
- THETA = 90.0 - self.theta[j]
+ if (self.theta[j] >= 0) and (self.theta[j] <= 90): # noqa: PLR2004
+ THETA = 90.0 - self.theta[j] # noqa: N806
else:
- THETA = 450 - self.theta[j]
+ THETA = 450 - self.theta[j] # noqa: N806
lat_t = self.RA * np.arcsin(
np.sin(lat / self.RA) * np.cos(self.r / self.R)
@@ -360,11 +360,11 @@ def compute_wind_field(self):
z0[k] = self.__interp_z0(lat_t[k], lon_t[k])
# configuring coefficients
z10 = 10.0
- A = 11.4
+ A = 11.4 # noqa: N806
h = A * z0**0.86
d = 0.75 * h
kappa = 0.40
- Cd = kappa**2 / (np.log((z10 + h - d) / z0)) ** 2
+ Cd = kappa**2 / (np.log((z10 + h - d) / z0)) ** 2 # noqa: N806
der_p = (
self.Holland_B
* self.cyclone_radm**self.Holland_B
@@ -407,38 +407,38 @@ def compute_wind_field(self):
(0.5 * (Ctheta - f * self.r)) ** 2.0
+ (self.r / self.AIR_DENSITY) * der_p
) ** (-0.5)
- BB = 1.0 / (2.0 * self.EDDY_VISCOCITY * self.r) * der_vg1_theta
- Eta = (
+ BB = 1.0 / (2.0 * self.EDDY_VISCOCITY * self.r) * der_vg1_theta # noqa: N806
+ Eta = ( # noqa: N806
(0.5 * (Ctheta - f * self.r)) ** 2.0
+ (self.r / self.AIR_DENSITY) * der_p
) ** 0.5
- ALPHA = (
+ ALPHA = ( # noqa: N806
1.0
/ (2.0 * self.EDDY_VISCOCITY)
* (f + 2.0 * vg1[j, :] / self.r)
)
- BETA = (
+ BETA = ( # noqa: N806
1.0
/ (2.0 * self.EDDY_VISCOCITY)
* (f + vg1[j, :] / self.r + der_vg1_r)
)
- GAMMA = 1.0 / (2.0 * self.EDDY_VISCOCITY) * vg1[j, :] / self.r
- ALPHA = np.array(
+ GAMMA = 1.0 / (2.0 * self.EDDY_VISCOCITY) * vg1[j, :] / self.r # noqa: N806
+ ALPHA = np.array( # noqa: N806
[complex(x, y) for x, y in zip(np.real(ALPHA), np.imag(ALPHA))]
)
- BETA = np.array(
+ BETA = np.array( # noqa: N806
[complex(x, y) for x, y in zip(np.real(BETA), np.imag(BETA))]
)
- XXX = -((ALPHA * BETA) ** 0.25)
- YYY = -((ALPHA * BETA) ** 0.25)
- PP_zero = np.array([complex(x, y) for x, y in zip(XXX, YYY)])
- PP_one = -complex(1, 1) * (
+ XXX = -((ALPHA * BETA) ** 0.25) # noqa: N806
+ YYY = -((ALPHA * BETA) ** 0.25) # noqa: N806
+ PP_zero = np.array([complex(x, y) for x, y in zip(XXX, YYY)]) # noqa: N806
+ PP_one = -complex(1, 1) * ( # noqa: N806
(GAMMA + np.sqrt(ALPHA * BETA) - BB) ** 0.5
)
- PP_minus_one = -complex(1, 1) * (
+ PP_minus_one = -complex(1, 1) * ( # noqa: N806
(-GAMMA + np.sqrt(ALPHA * BETA) - BB) ** 0.5
)
- X1 = (
+ X1 = ( # noqa: N806
PP_zero
+ f * self.r * Cd / self.EDDY_VISCOCITY
- 2.0 * Eta * Cd / self.EDDY_VISCOCITY
@@ -458,7 +458,7 @@ def compute_wind_field(self):
)
)
- X2 = (
+ X2 = ( # noqa: N806
-np.conj(PP_zero)
- f * self.r * Cd / self.EDDY_VISCOCITY
+ 2.0 * Eta * Cd / self.EDDY_VISCOCITY
@@ -478,14 +478,14 @@ def compute_wind_field(self):
)
)
- X3 = (
+ X3 = ( # noqa: N806
complex(0, -2)
* Cd
/ self.EDDY_VISCOCITY
* (Eta - f * self.r / 2.0) ** 2.0
)
- X4 = -(
+ X4 = -( # noqa: N806
-PP_zero
- f * self.r * Cd / (2.0 * self.EDDY_VISCOCITY)
+ Eta * Cd / self.EDDY_VISCOCITY
@@ -495,8 +495,8 @@ def compute_wind_field(self):
+ Eta * Cd / self.EDDY_VISCOCITY
)
- A_zero = -X3 / (X1 + X2 * X4)
- A_one = (
+ A_zero = -X3 / (X1 + X2 * X4) # noqa: N806
+ A_one = ( # noqa: N806
complex(0, 1)
* self.cyclone_sped
* Cd
@@ -504,7 +504,7 @@ def compute_wind_field(self):
/ (4.0 * self.EDDY_VISCOCITY * (PP_one - np.conj(PP_minus_one)))
* (A_zero + np.conj(A_zero))
)
- A_minus_one = -np.conj(A_one)
+ A_minus_one = -np.conj(A_one) # noqa: N806
# looping over different heights zp
for ii in range(len(self.zp)):
u_zero = np.sqrt(ALPHA / BETA) * np.real(
@@ -553,7 +553,7 @@ def compute_wind_field(self):
v1 = v
for m in range(v.shape[2]):
v1[:, :, m] = v1[:, :, m] + vg1
- U = (v1**2.0 + u**2.0) ** 0.5
+ U = (v1**2.0 + u**2.0) ** 0.5 # noqa: N806
# mapping to staitons
dd = (
@@ -569,7 +569,7 @@ def compute_wind_field(self):
/ self.RA
* 1000.0
)
- Delta = np.abs(np.array(station_lon)) - lon + self.EPS**2.0
+ Delta = np.abs(np.array(station_lon)) - lon + self.EPS**2.0 # noqa: N806
bearing = 90.0 + self.RA * np.arctan2(
np.sin(Delta / self.RA) * np.cos(np.array(station_lat) / self.RA),
np.cos(lat / self.RA) * np.sin(np.array(station_lat) / self.RA)
@@ -590,9 +590,9 @@ def compute_wind_field(self):
# copying results
self.station['PWS']['height'] = self.zp
self.station['PWS']['windspeed'] = station_umax.tolist()
- print('WindFieldSimulation: linear analytical simulation completed.')
+ print('WindFieldSimulation: linear analytical simulation completed.') # noqa: T201
def get_station_data(self):
- """get_station_data: returning station data"""
+ """get_station_data: returning station data""" # noqa: D400
# return station dictionary
return self.station
diff --git a/modules/performRegionalEventSimulation/siteResponse/RegionalSiteResponse.py b/modules/performRegionalEventSimulation/siteResponse/RegionalSiteResponse.py
index f45a76b79..c30b4434e 100644
--- a/modules/performRegionalEventSimulation/siteResponse/RegionalSiteResponse.py
+++ b/modules/performRegionalEventSimulation/siteResponse/RegionalSiteResponse.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of the SimCenter Backend Applications
@@ -48,68 +48,68 @@
# some filePath and python exe stuff
#
-thisDir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
-mainDir = thisDir.parents[1]
-mainDir = thisDir.parents[1]
-currentDir = os.getcwd()
+thisDir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120, N816
+mainDir = thisDir.parents[1] # noqa: N816
+mainDir = thisDir.parents[1] # noqa: N816
+currentDir = os.getcwd() # noqa: PTH109, N816
-pythonEXE = sys.executable
+pythonEXE = sys.executable # noqa: N816
-thisDir = str(thisDir)
-mainDir = str(mainDir)
-currentDir = str(currentDir)
+thisDir = str(thisDir) # noqa: N816
+mainDir = str(mainDir) # noqa: N816
+currentDir = str(currentDir) # noqa: N816
-print(f'thisDir: {thisDir}')
-print(f'mainDir: {mainDir}')
-print(f'currentDir: {currentDir}')
+print(f'thisDir: {thisDir}') # noqa: T201
+print(f'mainDir: {mainDir}') # noqa: T201
+print(f'currentDir: {currentDir}') # noqa: T201
-def runHazardSimulation(inputFILE):
+def runHazardSimulation(inputFILE): # noqa: N802, N803, D103
# log_msg('Startring simulation script...')
- sys.path.insert(0, os.getcwd())
+ sys.path.insert(0, os.getcwd()) # noqa: PTH109
#
# open input & parse json
#
- print(f'inputFILE: {inputFILE}')
- with open(inputFILE) as f:
- inputJSON = json.load(f)
+ print(f'inputFILE: {inputFILE}') # noqa: T201
+ with open(inputFILE) as f: # noqa: PTH123
+ inputJSON = json.load(f) # noqa: N806
#
# read needed input data
#
- unitData = inputJSON['units']
- inputApplications = inputJSON['Applications']
- hazardApplication = inputApplications['Hazard']
- regionalMappingApplication = inputApplications['RegionalMapping']
- uqApplication = inputApplications['UQ']
+ unitData = inputJSON['units'] # noqa: N806
+ inputApplications = inputJSON['Applications'] # noqa: N806
+ hazardApplication = inputApplications['Hazard'] # noqa: N806
+ regionalMappingApplication = inputApplications['RegionalMapping'] # noqa: N806
+ uqApplication = inputApplications['UQ'] # noqa: N806
- hazardAppData = hazardApplication['ApplicationData']
+ hazardAppData = hazardApplication['ApplicationData'] # noqa: N806
- soilFile = hazardAppData['soilGridParametersFile']
- soilPath = hazardAppData['soilParametersPath']
- responseScript = hazardAppData['siteResponseScript']
- scriptPath = hazardAppData['scriptPath']
+ soilFile = hazardAppData['soilGridParametersFile'] # noqa: N806
+ soilPath = hazardAppData['soilParametersPath'] # noqa: N806
+ responseScript = hazardAppData['siteResponseScript'] # noqa: N806
+ scriptPath = hazardAppData['scriptPath'] # noqa: N806
filters = hazardAppData['filter']
- eventFile = hazardAppData['inputEventFile']
- motionDir = hazardAppData['inputMotionDir']
- outputDir = hazardAppData['outputMotionDir']
+ eventFile = hazardAppData['inputEventFile'] # noqa: N806
+ motionDir = hazardAppData['inputMotionDir'] # noqa: N806
+ outputDir = hazardAppData['outputMotionDir'] # noqa: N806
# now create an input for siteResponseWHALE
- srtFILE = 'sc_srt.json'
+ srtFILE = 'sc_srt.json' # noqa: N806
- outputs = dict(EDP=True, DM=False, DV=False, every_realization=False)
+ outputs = dict(EDP=True, DM=False, DV=False, every_realization=False) # noqa: C408
- edpApplication = dict(Application='DummyEDP', ApplicationData=dict())
+ edpApplication = dict(Application='DummyEDP', ApplicationData=dict()) # noqa: C408, N806
- eventApp = dict(
+ eventApp = dict( # noqa: C408, N806
EventClassification='Earthquake',
Application='RegionalSiteResponse',
- ApplicationData=dict(
+ ApplicationData=dict( # noqa: C408
pathEventData=motionDir,
mainScript=responseScript,
modelPath=scriptPath,
@@ -117,17 +117,17 @@ def runHazardSimulation(inputFILE):
),
)
- regionalMappingAppData = regionalMappingApplication['ApplicationData']
+ regionalMappingAppData = regionalMappingApplication['ApplicationData'] # noqa: N806
regionalMappingAppData['filenameEVENTgrid'] = eventFile
- buildingApplication = dict(
+ buildingApplication = dict( # noqa: C408, N806
Application='CSV_to_BIM',
- ApplicationData=dict(
+ ApplicationData=dict( # noqa: C408
buildingSourceFile=f'{soilPath}{soilFile}', filter=filters
),
)
- Applications = dict(
+ Applications = dict( # noqa: C408, N806
UQ=uqApplication,
RegionalMapping=regionalMappingApplication,
Events=[eventApp],
@@ -135,23 +135,23 @@ def runHazardSimulation(inputFILE):
Building=buildingApplication,
)
- srt = dict(units=unitData, outputs=outputs, Applications=Applications)
+ srt = dict(units=unitData, outputs=outputs, Applications=Applications) # noqa: C408
- with open(srtFILE, 'w') as f:
+ with open(srtFILE, 'w') as f: # noqa: PTH123
json.dump(srt, f, indent=2)
#
# now invoke siteResponseWHALE
#
- inputDir = currentDir + '/input_data'
- tmpDir = currentDir + '/input_data/siteResponseRunningDir'
+ inputDir = currentDir + '/input_data' # noqa: N806
+ tmpDir = currentDir + '/input_data/siteResponseRunningDir' # noqa: N806
- print(
+ print( # noqa: T201
f'RUNNING {pythonEXE} {mainDir}/Workflow/siteResponseWHALE.py ./sc_srt.json --registry {mainDir}/Workflow/WorkflowApplications.json --referenceDir {inputDir} -w {tmpDir}'
)
- subprocess.run(
+ subprocess.run( # noqa: S603
[
pythonEXE,
mainDir + '/Workflow/siteResponseWHALE.py',
@@ -171,12 +171,12 @@ def runHazardSimulation(inputFILE):
# and moving all the motions created
#
- outputMotionDir = currentDir + '/input_data/' + outputDir
- print(
+ outputMotionDir = currentDir + '/input_data/' + outputDir # noqa: N806
+ print( # noqa: T201
f'RUNNING {pythonEXE} {mainDir}/createEVENT/siteResponse/createGM4BIM.py -i {tmpDir} -o {outputMotionDir} --removeInput'
)
- subprocess.run(
+ subprocess.run( # noqa: S603
[
pythonEXE,
mainDir + '/createEVENT/siteResponse/createGM4BIM.py',
@@ -197,16 +197,16 @@ def runHazardSimulation(inputFILE):
try:
shutil.rmtree(tmpDir)
except OSError as e:
- print('Error: %s : %s' % (tmpDir, e.strerror))
+ print('Error: %s : %s' % (tmpDir, e.strerror)) # noqa: T201, UP031
#
# modify inputFILE to provide new event file for regional mapping
#
- regionalMappingAppData = regionalMappingApplication['ApplicationData']
+ regionalMappingAppData = regionalMappingApplication['ApplicationData'] # noqa: N806
regionalMappingAppData['filenameEVENTgrid'] = f'{outputDir}/EventGrid.csv'
- with open(inputFILE, 'w') as f:
+ with open(inputFILE, 'w') as f: # noqa: PTH123
json.dump(inputJSON, f, indent=2)
#
diff --git a/modules/performRegionalMapping/NearestNeighborEvents/NNE.py b/modules/performRegionalMapping/NearestNeighborEvents/NNE.py
index 5d63495f7..a297e191d 100644
--- a/modules/performRegionalMapping/NearestNeighborEvents/NNE.py
+++ b/modules/performRegionalMapping/NearestNeighborEvents/NNE.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -48,19 +48,19 @@
from sklearn.neighbors import NearestNeighbors
-def find_neighbors(
+def find_neighbors( # noqa: C901, D103
asset_file,
event_grid_file,
samples,
neighbors,
filter_label,
seed,
- doParallel,
+ doParallel, # noqa: N803
):
# check if running parallel
- numP = 1
- procID = 0
- runParallel = False
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
+ runParallel = False # noqa: N806
if doParallel == 'True':
mpi_spec = importlib.util.find_spec('mpi4py')
@@ -68,15 +68,15 @@ def find_neighbors(
if found:
from mpi4py import MPI
- runParallel = True
+ runParallel = True # noqa: N806
comm = MPI.COMM_WORLD
- numP = comm.Get_size()
- procID = comm.Get_rank()
- if numP < 2:
- doParallel = 'False'
- runParallel = False
- numP = 1
- procID = 0
+ numP = comm.Get_size() # noqa: N806
+ procID = comm.Get_rank() # noqa: N806
+ if numP < 2: # noqa: PLR2004
+ doParallel = 'False' # noqa: N806
+ runParallel = False # noqa: N806
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
# read the event grid data file
event_grid_path = Path(event_grid_file).resolve()
@@ -86,9 +86,9 @@ def find_neighbors(
grid_df = pd.read_csv(event_dir / event_grid_file, header=0)
# store the locations of the grid points in X
- lat_E = grid_df['Latitude']
- lon_E = grid_df['Longitude']
- X = np.array([[lo, la] for lo, la in zip(lon_E, lat_E)])
+ lat_E = grid_df['Latitude'] # noqa: N806
+ lon_E = grid_df['Longitude'] # noqa: N806
+ X = np.array([[lo, la] for lo, la in zip(lon_E, lat_E)]) # noqa: N806
if filter_label == '':
grid_extra_keys = list(
@@ -105,18 +105,18 @@ def find_neighbors(
)
# load the building data file
- with open(asset_file, encoding='utf-8') as f:
+ with open(asset_file, encoding='utf-8') as f: # noqa: PTH123
asset_dict = json.load(f)
# prepare a dataframe that holds asset filenames and locations
- AIM_df = pd.DataFrame(
+ AIM_df = pd.DataFrame( # noqa: N806
columns=['Latitude', 'Longitude', 'file'], index=np.arange(len(asset_dict))
)
count = 0
for i, asset in enumerate(asset_dict):
- if runParallel == False or (i % numP) == procID:
- with open(asset['file'], encoding='utf-8') as f:
+ if runParallel == False or (i % numP) == procID: # noqa: E712
+ with open(asset['file'], encoding='utf-8') as f: # noqa: PTH123
asset_data = json.load(f)
asset_loc = asset_data['GeneralInformation']['location']
@@ -126,7 +126,7 @@ def find_neighbors(
count = count + 1
# store building locations in Y
- Y = np.array(
+ Y = np.array( # noqa: N806
[
[lo, la]
for lo, la in zip(AIM_df['Longitude'], AIM_df['Latitude'])
@@ -147,13 +147,13 @@ def find_neighbors(
count = 0
# iterate through the buildings and store the selected events in the AIM
- for asset_i, (AIM_id, dist_list, ind_list) in enumerate(
+ for asset_i, (AIM_id, dist_list, ind_list) in enumerate( # noqa: B007, N806
zip(AIM_df.index, distances, indices)
):
# open the AIM file
asst_file = AIM_df.iloc[AIM_id]['file']
- with open(asst_file, encoding='utf-8') as f:
+ with open(asst_file, encoding='utf-8') as f: # noqa: PTH123
asset_data = json.load(f)
if filter_label != '':
@@ -164,8 +164,8 @@ def find_neighbors(
# only keep the distances and indices corresponding to neighbors
# with the same soil type
- dist_list = dist_list[(grid_label == asset_label).values]
- ind_list = ind_list[(grid_label == asset_label).values]
+ dist_list = dist_list[(grid_label == asset_label).values] # noqa: PD011, PLW2901
+ ind_list = ind_list[(grid_label == asset_label).values] # noqa: PD011, PLW2901
# return dist_list & ind_list with a length equals neighbors
# assuming that at least neighbors grid points exist with
@@ -173,26 +173,26 @@ def find_neighbors(
# because dist_list, ind_list sorted initially in order of increasing
# distance, just take the first neighbors grid points of each
- dist_list = dist_list[:neighbors]
- ind_list = ind_list[:neighbors]
+ dist_list = dist_list[:neighbors] # noqa: PLW2901
+ ind_list = ind_list[:neighbors] # noqa: PLW2901
if len(grid_extra_keys) > 0:
filter_labels = []
- for key in asset_data['GeneralInformation'].keys():
+ for key in asset_data['GeneralInformation'].keys(): # noqa: SIM118
if key in grid_extra_keys:
- filter_labels.append(key)
+ filter_labels.append(key) # noqa: PERF401
filter_list = [True for i in dist_list]
- for filter_label in filter_labels:
+ for filter_label in filter_labels: # noqa: PLR1704
asset_label = asset_data['GeneralInformation'][filter_label]
grid_label = grid_df[filter_label][ind_list]
- filter_list_i = (grid_label == asset_label).values
+ filter_list_i = (grid_label == asset_label).values # noqa: PD011
filter_list = filter_list and filter_list_i
# only keep the distances and indices corresponding to neighbors
# with the same soil type
- dist_list = dist_list[filter_list]
- ind_list = ind_list[filter_list]
+ dist_list = dist_list[filter_list] # noqa: PLW2901
+ ind_list = ind_list[filter_list] # noqa: PLW2901
# return dist_list & ind_list with a length equals neighbors
# assuming that at least neighbors grid points exist with
@@ -200,11 +200,11 @@ def find_neighbors(
# because dist_list, ind_list sorted initially in order of increasing
# distance, just take the first neighbors grid points of each
- dist_list = dist_list[:neighbors]
- ind_list = ind_list[:neighbors]
+ dist_list = dist_list[:neighbors] # noqa: PLW2901
+ ind_list = ind_list[:neighbors] # noqa: PLW2901
# calculate the weights for each neighbor based on their distance
- dist_list = 1.0 / (dist_list**2.0)
+ dist_list = 1.0 / (dist_list**2.0) # noqa: PLW2901
weights = np.array(dist_list) / np.sum(dist_list)
# get the pre-defined number of samples for each neighbor
@@ -268,7 +268,7 @@ def find_neighbors(
# IM collections are not scaled
scale_list.append(1.0)
- # TODO: update the LLNL input data and remove this clause
+ # TODO: update the LLNL input data and remove this clause # noqa: TD002
else:
event_list = []
for e, i in zip(nbr_samples, ind_list):
@@ -290,7 +290,7 @@ def find_neighbors(
event_list_json.append([f'{event}x{e_i:05d}', scale_list[e_i]])
# save the event dictionary to the AIM
- # TODO: we assume there is only one event
+ # TODO: we assume there is only one event # noqa: TD002
# handling multiple events will require more sophisticated inputs
if 'Events' not in asset_data:
@@ -308,7 +308,7 @@ def find_neighbors(
}
)
- with open(asst_file, 'w', encoding='utf-8') as f:
+ with open(asst_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(asset_data, f, indent=2)
diff --git a/modules/performRegionalMapping/SiteSpecifiedEvents/SSE.py b/modules/performRegionalMapping/SiteSpecifiedEvents/SSE.py
index b4070d1ac..4488e680b 100644
--- a/modules/performRegionalMapping/SiteSpecifiedEvents/SSE.py
+++ b/modules/performRegionalMapping/SiteSpecifiedEvents/SSE.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -50,11 +50,11 @@
from scipy.cluster.vq import vq
-def create_event(asset_file, event_grid_file, multipleEvents, doParallel):
+def create_event(asset_file, event_grid_file, multipleEvents, doParallel): # noqa: C901, N803, D103
# check if running parallel
- numP = 1
- procID = 0
- runParallel = False
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
+ runParallel = False # noqa: N806
if doParallel == 'True':
mpi_spec = importlib.util.find_spec('mpi4py')
@@ -62,15 +62,15 @@ def create_event(asset_file, event_grid_file, multipleEvents, doParallel):
if found:
from mpi4py import MPI
- runParallel = True
+ runParallel = True # noqa: N806
comm = MPI.COMM_WORLD
- numP = comm.Get_size()
- procID = comm.Get_rank()
- if numP < 2:
- doParallel = 'False'
- runParallel = False
- numP = 1
- procID = 0
+ numP = comm.Get_size() # noqa: N806
+ procID = comm.Get_rank() # noqa: N806
+ if numP < 2: # noqa: PLR2004
+ doParallel = 'False' # noqa: N806
+ runParallel = False # noqa: N806
+ numP = 1 # noqa: N806
+ procID = 0 # noqa: N806
# read the event grid data file
event_grid_path = Path(event_grid_file).resolve()
@@ -80,23 +80,23 @@ def create_event(asset_file, event_grid_file, multipleEvents, doParallel):
grid_df = pd.read_csv(event_dir / event_grid_file, header=0)
# store the locations of the grid points in X
- lat_E = grid_df['Latitude']
- lon_E = grid_df['Longitude']
- X = np.array([[lo, la] for lo, la in zip(lon_E, lat_E)])
+ lat_E = grid_df['Latitude'] # noqa: N806
+ lon_E = grid_df['Longitude'] # noqa: N806
+ X = np.array([[lo, la] for lo, la in zip(lon_E, lat_E)]) # noqa: N806
# load the asset data file
- with open(asset_file, encoding='utf-8') as f:
+ with open(asset_file, encoding='utf-8') as f: # noqa: PTH123
asset_dict = json.load(f)
# prepare a dataframe that holds asset filenames and locations
- AIM_df = pd.DataFrame(
+ AIM_df = pd.DataFrame( # noqa: N806
columns=['Latitude', 'Longitude', 'file'], index=np.arange(len(asset_dict))
)
count = 0
for i, asset in enumerate(asset_dict):
- if runParallel == False or (i % numP) == procID:
- with open(asset['file'], encoding='utf-8') as f:
+ if runParallel == False or (i % numP) == procID: # noqa: E712
+ with open(asset['file'], encoding='utf-8') as f: # noqa: PTH123
asset_data = json.load(f)
asset_loc = asset_data['GeneralInformation']['location']
@@ -106,7 +106,7 @@ def create_event(asset_file, event_grid_file, multipleEvents, doParallel):
count = count + 1
# store asset locations in Y
- Y = np.array(
+ Y = np.array( # noqa: N806
[
[lo, la]
for lo, la in zip(AIM_df['Longitude'], AIM_df['Latitude'])
@@ -129,10 +129,10 @@ def create_event(asset_file, event_grid_file, multipleEvents, doParallel):
# check to ensure we found all of the assets
if len(closest) != np.size(Y, 0):
- print(
+ print( # noqa: T201
'Error, the number of assets needs to be equal to the number of grid points'
)
- print(
+ print( # noqa: T201
'The number of assets is '
+ str(np.size(Y, 0))
+ ' and the number of grid points is '
@@ -141,11 +141,11 @@ def create_event(asset_file, event_grid_file, multipleEvents, doParallel):
return 1
# iterate through the assets and store the selected events in the AIM
- for idx, AIM_id in enumerate(AIM_df.index):
+ for idx, AIM_id in enumerate(AIM_df.index): # noqa: RET503, N806
# open the AIM file
asset_file = AIM_df.iloc[AIM_id]['file']
- with open(asset_file, encoding='utf-8') as f:
+ with open(asset_file, encoding='utf-8') as f: # noqa: PTH123
asset_data = json.load(f)
# this is the preferred behavior, the else clause is left for legacy inputs
@@ -167,13 +167,13 @@ def create_event(asset_file, event_grid_file, multipleEvents, doParallel):
else:
event_type = 'intensityMeasure'
- event_count = first_file.shape[0]
+ event_count = first_file.shape[0] # noqa: F841
# collect the list of events and scale factors
event_list = []
scale_list = []
- closestPnt = grid_df.iloc[closest[idx]]
+ closestPnt = grid_df.iloc[closest[idx]] # noqa: N806
# if the grid has ground motion records...
if event_type == 'timeHistory':
@@ -214,17 +214,17 @@ def create_event(asset_file, event_grid_file, multipleEvents, doParallel):
# If GP_file contains multiple events
if multipleEvents:
# Read the GP_file
- GP_file = os.path.join(event_dir, closestPnt['GP_file'])
- GP_file_df = pd.read_csv(GP_file, header=0)
+ GP_file = os.path.join(event_dir, closestPnt['GP_file']) # noqa: PTH118, N806
+ GP_file_df = pd.read_csv(GP_file, header=0) # noqa: N806
if GP_file_df.shape[0] > 1:
for row in range(1, GP_file_df.shape[0]):
event_list.append(closestPnt['GP_file'] + f'x{row}')
scale_list.append(1.0)
- # TODO: update the LLNL input data and remove this clause
+ # TODO: update the LLNL input data and remove this clause # noqa: TD002
else:
event_list = []
- for e, i in zip(nbr_samples, ind_list):
+ for e, i in zip(nbr_samples, ind_list): # noqa: B007, F821
event_list += [
closestPnt['GP_file'],
] * e
@@ -254,7 +254,7 @@ def create_event(asset_file, event_grid_file, multipleEvents, doParallel):
# "type": "SimCenterEvents"
}
- with open(asset_file, 'w', encoding='utf-8') as f:
+ with open(asset_file, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(asset_data, f, indent=2)
diff --git a/modules/performSIMULATION/IMasEDP/IMasEDP.py b/modules/performSIMULATION/IMasEDP/IMasEDP.py
index 7fdd9f5bb..bee9bd787 100644
--- a/modules/performSIMULATION/IMasEDP/IMasEDP.py
+++ b/modules/performSIMULATION/IMasEDP/IMasEDP.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -45,10 +45,10 @@
import numpy as np
-def write_RV(EVENT_input_path):
+def write_RV(EVENT_input_path): # noqa: C901, N802, N803, D103
# open the event file and get the list of events
- with open(EVENT_input_path, encoding='utf-8') as f:
- EVENT_in = json.load(f)
+ with open(EVENT_input_path, encoding='utf-8') as f: # noqa: PTH123
+ EVENT_in = json.load(f) # noqa: N806
# if there is a list of possible events, load all of them
if len(EVENT_in['randomVariables']) > 0:
@@ -73,7 +73,7 @@ def write_RV(EVENT_input_path):
file_sample_dict[filename][0].append(e_i)
file_sample_dict[filename][1].append(int(sample_id))
- EDP_output = None
+ EDP_output = None # noqa: N806
for filename in file_sample_dict:
# get the header
@@ -101,29 +101,29 @@ def write_RV(EVENT_input_path):
if EDP_output is None:
if len(samples.shape) > 1:
- EDP_output = np.zeros((len(event_list), samples.shape[1]))
+ EDP_output = np.zeros((len(event_list), samples.shape[1])) # noqa: N806
else:
- EDP_output = np.zeros(len(event_list))
+ EDP_output = np.zeros(len(event_list)) # noqa: N806
EDP_output[file_sample_dict[filename][0]] = samples
if len(EDP_output.shape) == 1:
- EDP_output = np.reshape(EDP_output, (EDP_output.shape[0], 1))
+ EDP_output = np.reshape(EDP_output, (EDP_output.shape[0], 1)) # noqa: N806
- EDP_output = EDP_output.T
+ EDP_output = EDP_output.T # noqa: N806
for c_i, col in enumerate(header):
f_i = f_scale.get(col.strip(), f_scale.get('ALL', None))
if f_i is None:
- raise ValueError(f'No units defined for {col}')
+ raise ValueError(f'No units defined for {col}') # noqa: EM102, TRY003
EDP_output[c_i] *= f_i
- EDP_output = EDP_output.T
+ EDP_output = EDP_output.T # noqa: N806
index = np.reshape(np.arange(EDP_output.shape[0]), (EDP_output.shape[0], 1))
- EDP_output = np.concatenate([index, EDP_output], axis=1)
+ EDP_output = np.concatenate([index, EDP_output], axis=1) # noqa: N806
working_dir = Path(PurePath(EVENT_input_path).parent)
# working_dir = posixpath.dirname(EVENT_input_path)
@@ -132,7 +132,7 @@ def write_RV(EVENT_input_path):
header_out = []
for h_label in header:
# remove leading and trailing whitespace
- h_label = h_label.strip()
+ h_label = h_label.strip() # noqa: PLW2901
# convert suffixes to the loc-dir format used by the SimCenter
if h_label.endswith('_h'): # horizontal
@@ -159,24 +159,24 @@ def write_RV(EVENT_input_path):
)
-# TODO: consider removing this function
+# TODO: consider removing this function # noqa: TD002
# It is not used currently
-def create_EDP(EVENT_input_path, EDP_input_path):
+def create_EDP(EVENT_input_path, EDP_input_path): # noqa: N802, N803, D103
# load the EDP file
- with open(EDP_input_path, encoding='utf-8') as f:
- EDP_in = json.load(f)
+ with open(EDP_input_path, encoding='utf-8') as f: # noqa: PTH123
+ EDP_in = json.load(f) # noqa: N806
# load the EVENT file
- with open(EVENT_input_path, encoding='utf-8') as f:
- EVENT_in = json.load(f)
+ with open(EVENT_input_path, encoding='utf-8') as f: # noqa: PTH123
+ EVENT_in = json.load(f) # noqa: N806
# store the IM(s) in the EDP file
for edp in EDP_in['EngineeringDemandParameters'][0]['responses']:
for im in EVENT_in['Events']:
- if edp['type'] in im.keys():
+ if edp['type'] in im.keys(): # noqa: SIM118
edp['scalar_data'] = [im[edp['type']]]
- with open(EDP_input_path, 'w', encoding='utf-8') as f:
+ with open(EDP_input_path, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(EDP_in, f, indent=2)
diff --git a/modules/performSIMULATION/customPy/customPySimulation.py b/modules/performSIMULATION/customPy/customPySimulation.py
index 0d5206df3..b776d3ead 100644
--- a/modules/performSIMULATION/customPy/customPySimulation.py
+++ b/modules/performSIMULATION/customPy/customPySimulation.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2022 Leland Stanford Junior University
# Copyright (c) 2022 The Regents of the University of California
#
@@ -46,14 +46,14 @@
from pathlib import Path
# import the common constants and methods
-this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
+this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120
main_dir = this_dir.parents[1]
sys.path.insert(0, str(main_dir / 'common'))
-from simcenter_common import *
+from simcenter_common import * # noqa: E402, F403
-convert_EDP = {
+convert_EDP = { # noqa: N816
'max_abs_acceleration': 'PFA',
'max_rel_disp': 'PFD',
'max_drift': 'PID',
@@ -63,44 +63,44 @@
}
-def write_RV():
+def write_RV(): # noqa: N802, D103
# create an empty SIM file
- SIM = {}
+ SIM = {} # noqa: N806
- with open('SIM.json', 'w', encoding='utf-8') as f:
+ with open('SIM.json', 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(SIM, f, indent=2)
- # TODO: check simulation data exists and contains all important fields
- # TODO: get simulation data & write to SIM file
+ # TODO: check simulation data exists and contains all important fields # noqa: TD002
+ # TODO: get simulation data & write to SIM file # noqa: TD002
-def run_simulation(EVENT_input_path, SAM_input_path, AIM_input_path, EDP_input_path):
+def run_simulation(EVENT_input_path, SAM_input_path, AIM_input_path, EDP_input_path): # noqa: C901, N803, D103
# these imports are here to save time when the app is called without
# the -getRV flag
import sys
- log_msg('Startring simulation script...')
+ log_msg('Startring simulation script...') # noqa: F405
- working_dir = os.getcwd()
+ working_dir = os.getcwd() # noqa: PTH109
- sys.path.insert(0, os.getcwd())
+ sys.path.insert(0, os.getcwd()) # noqa: PTH109
# load the AIM file
- with open(AIM_input_path, encoding='utf-8') as f:
- AIM_in = json.load(f)
+ with open(AIM_input_path, encoding='utf-8') as f: # noqa: PTH123
+ AIM_in = json.load(f) # noqa: N806
# load the SAM file
- with open(SAM_input_path, encoding='utf-8') as f:
- SAM_in = json.load(f)
+ with open(SAM_input_path, encoding='utf-8') as f: # noqa: PTH123
+ SAM_in = json.load(f) # noqa: N806
# load the event file
- with open(EVENT_input_path, encoding='utf-8') as f:
- EVENT_in = json.load(f)['Events'][0]
+ with open(EVENT_input_path, encoding='utf-8') as f: # noqa: PTH123
+ EVENT_in = json.load(f)['Events'][0] # noqa: N806
# load the EDP file
- with open(EDP_input_path, encoding='utf-8') as f:
- EDP_in = json.load(f)
+ with open(EDP_input_path, encoding='utf-8') as f: # noqa: PTH123
+ EDP_in = json.load(f) # noqa: N806
# KZ: commented out --> we're running at the current workdir
# sys.path.insert(0, SAM_in['modelPath'])
@@ -111,16 +111,16 @@ def run_simulation(EVENT_input_path, SAM_input_path, AIM_input_path, EDP_input_p
custom_script_path = SAM_in['mainScript']
# copy the custom scripts to the current directory if not yet
- if os.path.exists(custom_script_path):
+ if os.path.exists(custom_script_path): # noqa: PTH110
pass
else:
custom_script_dir = SAM_in.get('modelPath', None)
if custom_script_dir is None:
- log_msg('No modelPath found in the SAM file.')
+ log_msg('No modelPath found in the SAM file.') # noqa: F405
else:
- shutil.copytree(custom_script_dir, os.getcwd(), dirs_exist_ok=True)
- log_msg(
- f'Custom scripts copied from {custom_script_dir} to {os.getcwd()}'
+ shutil.copytree(custom_script_dir, os.getcwd(), dirs_exist_ok=True) # noqa: PTH109
+ log_msg( # noqa: F405
+ f'Custom scripts copied from {custom_script_dir} to {os.getcwd()}' # noqa: PTH109
)
custom_script = importlib.__import__(
@@ -136,23 +136,23 @@ def run_simulation(EVENT_input_path, SAM_input_path, AIM_input_path, EDP_input_p
custom_analysis = custom_script.custom_analysis
# run the analysis
- EDP_res = custom_analysis(AIM=AIM_in, EVENT=EVENT_in, SAM=SAM_in, EDP=EDP_in)
+ EDP_res = custom_analysis(AIM=AIM_in, EVENT=EVENT_in, SAM=SAM_in, EDP=EDP_in) # noqa: N806
os.chdir(working_dir)
results_txt = ''
- EDP_list = EDP_in['EngineeringDemandParameters'][0]['responses']
+ EDP_list = EDP_in['EngineeringDemandParameters'][0]['responses'] # noqa: N806
# KZ: rewriting the parsing step of EDP_res to EDP_list
for response in EDP_list:
- print('response = ', response)
+ print('response = ', response) # noqa: T201
response['scalar_data'] = []
try:
val = EDP_res.get(response['type'], None)
- print('val = ', val)
+ print('val = ', val) # noqa: T201
if val is None:
# try conversion
edp_name = convert_EDP.get(response['type'], None)
- print('edp_name = ', edp_name)
+ print('edp_name = ', edp_name) # noqa: T201
if edp_name is not None:
if 'PID' in edp_name:
cur_floor = response['floor2']
@@ -166,12 +166,12 @@ def run_simulation(EVENT_input_path, SAM_input_path, AIM_input_path, EDP_input_p
if len(dofs) == 0:
dofs = [1, 2] # default is bidirection
response['dofs'] = dofs
- print('dofs = ', dofs)
+ print('dofs = ', dofs) # noqa: T201
for cur_dof in dofs:
key_name = (
'1-' + edp_name + f'-{int(cur_floor)}-{int(cur_dof)}'
)
- print('key_name = ', key_name)
+ print('key_name = ', key_name) # noqa: T201
res = EDP_res.get(key_name, None)
if res is None:
response['scalar_data'].append('NaN')
@@ -179,14 +179,14 @@ def run_simulation(EVENT_input_path, SAM_input_path, AIM_input_path, EDP_input_p
else:
response['scalar_data'].append(float(EDP_res[key_name]))
results_txt += str(float(EDP_res[key_name])) + ' '
- print('response = ', response)
+ print('response = ', response) # noqa: T201
else:
response['scalar_data'] = ['NaN']
results_txt += 'NaN '
else:
response['scalar_data'] = [float(val)]
results_txt += str(float(EDP_res[response['type']])) + ' '
- except:
+ except: # noqa: E722
response['scalar_data'] = ['NaN']
results_txt += 'NaN '
# edp = EDP_res[response['type']][response['id']]
@@ -196,10 +196,10 @@ def run_simulation(EVENT_input_path, SAM_input_path, AIM_input_path, EDP_input_p
# print(response)
results_txt = results_txt[:-1]
- with open(EDP_input_path, 'w', encoding='utf-8') as f:
+ with open(EDP_input_path, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(EDP_in, f, indent=2)
- with open('results.out', 'w', encoding='utf-8') as f:
+ with open('results.out', 'w', encoding='utf-8') as f: # noqa: PTH123
f.write(results_txt)
"""
@@ -304,7 +304,7 @@ def run_simulation(EVENT_input_path, SAM_input_path, AIM_input_path, EDP_input_p
json.dump(EDP_in, f, indent=2)
"""
- log_msg('Simulation script finished.')
+ log_msg('Simulation script finished.') # noqa: F405
if __name__ == '__main__':
diff --git a/modules/performSIMULATION/openSees/OpenSeesSimulation.py b/modules/performSIMULATION/openSees/OpenSeesSimulation.py
index f504ce729..72bb89e42 100644
--- a/modules/performSIMULATION/openSees/OpenSeesSimulation.py
+++ b/modules/performSIMULATION/openSees/OpenSeesSimulation.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python3 # noqa: EXE001, D100
import os
import subprocess
@@ -7,18 +7,18 @@
# from pathlib import Path
-def main(args):
+def main(args): # noqa: D103
# set filenames
- aimName = args[1]
- samName = args[3]
- evtName = args[5]
- edpName = args[7]
- simName = args[9]
+ aimName = args[1] # noqa: N806
+ samName = args[3] # noqa: N806
+ evtName = args[5] # noqa: N806
+ edpName = args[7] # noqa: N806
+ simName = args[9] # noqa: N806
# remove path to AIM file, so recorders are not messed up
# .. AIM file ro be read is in current dir (copy elsewhere)
- aimName = os.path.basename(aimName)
- scriptDir = os.path.dirname(os.path.realpath(__file__))
+ aimName = os.path.basename(aimName) # noqa: PTH119, N806
+ scriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N806
# aimName = Path(args[1]).name
# scriptDir = Path(__file__).resolve().parent
@@ -26,22 +26,22 @@ def main(args):
# If requesting random variables run getUncertainty
# Otherwise, Run Opensees
if '--getRV' in args:
- getUncertaintyCommand = f'"{scriptDir}/OpenSeesPreprocessor" {aimName} {samName} {evtName} {simName}'
- exit_code = subprocess.Popen(getUncertaintyCommand, shell=True).wait()
+ getUncertaintyCommand = f'"{scriptDir}/OpenSeesPreprocessor" {aimName} {samName} {evtName} {simName}' # noqa: N806
+ exit_code = subprocess.Popen(getUncertaintyCommand, shell=True).wait() # noqa: S602
# exit_code = subprocess.run(getUncertaintyCommand, shell=True).returncode
# if not exit_code==0:
# exit(exit_code)
else:
# Run preprocessor
- preprocessorCommand = f'"{scriptDir}/OpenSeesPreprocessor" {aimName} {samName} {evtName} {edpName} {simName} example.tcl'
- exit_code = subprocess.Popen(preprocessorCommand, shell=True).wait()
+ preprocessorCommand = f'"{scriptDir}/OpenSeesPreprocessor" {aimName} {samName} {evtName} {edpName} {simName} example.tcl' # noqa: N806
+ exit_code = subprocess.Popen(preprocessorCommand, shell=True).wait() # noqa: S602
# exit_code = subprocess.run(preprocessorCommand, shell=True).returncode # Maybe better for compatibility - jb
# if not exit_code==0:
# exit(exit_code)
# Run OpenSees
- exit_code = subprocess.Popen(
- 'OpenSees example.tcl >> workflow.err 2>&1',
+ exit_code = subprocess.Popen( # noqa: S602
+ 'OpenSees example.tcl >> workflow.err 2>&1', # noqa: S607
shell=True,
).wait()
# Maybe better for compatibility, need to doublecheck - jb
@@ -58,8 +58,8 @@ def main(args):
# exit(exit_code)
# Run postprocessor
- postprocessorCommand = f'"{scriptDir}/OpenSeesPostprocessor" {aimName} {samName} {evtName} {edpName}'
- exit_code = subprocess.Popen(postprocessorCommand, shell=True).wait()
+ postprocessorCommand = f'"{scriptDir}/OpenSeesPostprocessor" {aimName} {samName} {evtName} {edpName}' # noqa: N806
+ exit_code = subprocess.Popen(postprocessorCommand, shell=True).wait() # noqa: S602, F841
# exit_code = subprocess.run(postprocessorCommand, shell=True).returncode # Maybe better for compatibility - jb
# if not exit_code==0:
# exit(exit_code)
diff --git a/modules/performSIMULATION/openSeesPy/OpenSeesPySimulation.py b/modules/performSIMULATION/openSeesPy/OpenSeesPySimulation.py
index 7689b8fff..8a4457173 100644
--- a/modules/performSIMULATION/openSeesPy/OpenSeesPySimulation.py
+++ b/modules/performSIMULATION/openSeesPy/OpenSeesPySimulation.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -46,14 +46,14 @@
from pathlib import Path
# import the common constants and methods
-this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
+this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120
main_dir = this_dir.parents[1]
sys.path.insert(0, str(main_dir / 'common'))
-from simcenter_common import *
+from simcenter_common import * # noqa: E402, F403
-convert_EDP = {
+convert_EDP = { # noqa: N816
'max_abs_acceleration': 'PFA',
'max_rel_disp': 'PFD',
'max_drift': 'PID',
@@ -63,14 +63,14 @@
}
-def write_RV():
+def write_RV(): # noqa: N802, D103
pass
- # TODO: check simulation data exists and contains all important fields
- # TODO: get simulation data & write to SIM file
+ # TODO: check simulation data exists and contains all important fields # noqa: TD002
+ # TODO: get simulation data & write to SIM file # noqa: TD002
-def run_openseesPy(EVENT_input_path, SAM_input_path, BIM_input_path, EDP_input_path):
+def run_openseesPy(EVENT_input_path, SAM_input_path, BIM_input_path, EDP_input_path): # noqa: C901, N802, N803, D103
# these imports are here to save time when the app is called without
# the -getRV flag
import sys
@@ -78,18 +78,18 @@ def run_openseesPy(EVENT_input_path, SAM_input_path, BIM_input_path, EDP_input_p
import numpy as np
import openseespy.opensees as ops
- log_msg('Startring simulation script...')
+ log_msg('Startring simulation script...') # noqa: F405
- sys.path.insert(0, os.getcwd())
+ sys.path.insert(0, os.getcwd()) # noqa: PTH109
# load the model builder script
- with open(BIM_input_path, encoding='utf-8') as f:
- BIM_in = json.load(f)
+ with open(BIM_input_path, encoding='utf-8') as f: # noqa: PTH123
+ BIM_in = json.load(f) # noqa: N806
model_params = BIM_in['GeneralInformation']
- with open(SAM_input_path, encoding='utf-8') as f:
- SAM_in = json.load(f)
+ with open(SAM_input_path, encoding='utf-8') as f: # noqa: PTH123
+ SAM_in = json.load(f) # noqa: N806
sys.path.insert(0, SAM_in['modelPath'])
@@ -97,7 +97,7 @@ def run_openseesPy(EVENT_input_path, SAM_input_path, BIM_input_path, EDP_input_p
dof_map = [int(dof) for dof in SAM_in['dofMap'].split(',')]
- node_map = dict(
+ node_map = dict( # noqa: C404
[
(int(entry['floor']), int(entry['node']))
for entry in SAM_in['NodeMapping']
@@ -121,15 +121,15 @@ def run_openseesPy(EVENT_input_path, SAM_input_path, BIM_input_path, EDP_input_p
build_model(model_params=model_params)
# load the event file
- with open(EVENT_input_path, encoding='utf-8') as f:
- EVENT_in = json.load(f)['Events'][0]
+ with open(EVENT_input_path, encoding='utf-8') as f: # noqa: PTH123
+ EVENT_in = json.load(f)['Events'][0] # noqa: N806
event_list = EVENT_in['timeSeries']
pattern_list = EVENT_in['pattern']
- # TODO: use dictionary
+ # TODO: use dictionary # noqa: TD002
pattern_ts_link = [p['timeSeries'] for p in pattern_list]
- TS_list = []
+ TS_list = [] # noqa: N806
# define the time series
for evt_i, event in enumerate(event_list):
@@ -183,10 +183,10 @@ def run_openseesPy(EVENT_input_path, SAM_input_path, BIM_input_path, EDP_input_p
# create the EDP specification
# load the EDP file
- with open(EDP_input_path, encoding='utf-8') as f:
- EDP_in = json.load(f)
+ with open(EDP_input_path, encoding='utf-8') as f: # noqa: PTH123
+ EDP_in = json.load(f) # noqa: N806
- EDP_list = EDP_in['EngineeringDemandParameters'][0]['responses']
+ EDP_list = EDP_in['EngineeringDemandParameters'][0]['responses'] # noqa: N806
edp_specs = {}
for response in EDP_list:
@@ -202,7 +202,7 @@ def run_openseesPy(EVENT_input_path, SAM_input_path, BIM_input_path, EDP_input_p
edp_specs[response['type']].update(
{
- response['id']: dict(
+ response['id']: dict( # noqa: C404
[
(dof, list(np.atleast_1d(response['node'])))
for dof in response['dofs']
@@ -226,7 +226,7 @@ def run_openseesPy(EVENT_input_path, SAM_input_path, BIM_input_path, EDP_input_p
if floor is not None:
edp_specs[response['type']].update(
{
- response['id']: dict(
+ response['id']: dict( # noqa: C404
[(dof, node_list) for dof in response['dofs']]
)
}
@@ -236,8 +236,8 @@ def run_openseesPy(EVENT_input_path, SAM_input_path, BIM_input_path, EDP_input_p
# print(edp_name, edp_data)
# run the analysis
- # TODO: default analysis script
- EDP_res = run_analysis(
+ # TODO: default analysis script # noqa: TD002
+ EDP_res = run_analysis( # noqa: N806
GM_dt=EVENT_in['dT'],
GM_npts=EVENT_in['numSteps'],
TS_List=TS_list,
@@ -256,10 +256,10 @@ def run_openseesPy(EVENT_input_path, SAM_input_path, BIM_input_path, EDP_input_p
response['scalar_data'] = edp # [val for dof, val in edp.items()]
# print(response)
- with open(EDP_input_path, 'w', encoding='utf-8') as f:
+ with open(EDP_input_path, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(EDP_in, f, indent=2)
- log_msg('Simulation script finished.')
+ log_msg('Simulation script finished.') # noqa: F405
if __name__ == '__main__':
diff --git a/modules/performSIMULATION/openSees_R/OpenSeesSimulation.py b/modules/performSIMULATION/openSees_R/OpenSeesSimulation.py
index 9005a7fd9..a3255f703 100644
--- a/modules/performSIMULATION/openSees_R/OpenSeesSimulation.py
+++ b/modules/performSIMULATION/openSees_R/OpenSeesSimulation.py
@@ -1,35 +1,35 @@
-import os
+import os # noqa: INP001, D100
import subprocess
import sys
-inputArgs = sys.argv
+inputArgs = sys.argv # noqa: N816
# set filenames
-bimName = sys.argv[2]
-samName = sys.argv[4]
-evtName = sys.argv[6]
-edpName = sys.argv[8]
-simName = sys.argv[10]
+bimName = sys.argv[2] # noqa: N816
+samName = sys.argv[4] # noqa: N816
+evtName = sys.argv[6] # noqa: N816
+edpName = sys.argv[8] # noqa: N816
+simName = sys.argv[10] # noqa: N816
-scriptDir = os.path.dirname(os.path.realpath(__file__))
+scriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N816
# If requesting random variables run getUncertainty
# Otherwise, Run Opensees
if ('-getRV' in inputArgs) or ('--getRV' in inputArgs):
- getUncertaintyCommand = (
+ getUncertaintyCommand = ( # noqa: N816
f'"{scriptDir}/getUncertainty" {bimName} {samName} {evtName} {simName}'
)
subprocess.Popen(args=getUncertaintyCommand, shell=True).wait()
else:
# Run preprocessor
- preprocessorCommand = f'"{scriptDir}/mainPreprocessor" {bimName} {samName} {evtName} {edpName} example.tcl'
- subprocess.Popen(preprocessorCommand, shell=True).wait()
+ preprocessorCommand = f'"{scriptDir}/mainPreprocessor" {bimName} {samName} {evtName} {edpName} example.tcl' # noqa: N816
+ subprocess.Popen(preprocessorCommand, shell=True).wait() # noqa: S602
# Run OpenSees
- subprocess.Popen('OpenSees example.tcl', shell=True).wait()
+ subprocess.Popen('OpenSees example.tcl', shell=True).wait() # noqa: S602, S607
# Run postprocessor
- postprocessorCommand = (
+ postprocessorCommand = ( # noqa: N816
f'"{scriptDir}/mainPostprocessor" {bimName} {samName} {evtName} {edpName}'
)
- subprocess.Popen(postprocessorCommand, shell=True).wait()
+ subprocess.Popen(postprocessorCommand, shell=True).wait() # noqa: S602
diff --git a/modules/performSIMULATION/surrogateRegionalPy/SurrogateRegionalPy.py b/modules/performSIMULATION/surrogateRegionalPy/SurrogateRegionalPy.py
index 19f391652..fce486786 100644
--- a/modules/performSIMULATION/surrogateRegionalPy/SurrogateRegionalPy.py
+++ b/modules/performSIMULATION/surrogateRegionalPy/SurrogateRegionalPy.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -50,24 +50,24 @@
import sys
-def main(aimName, samName, evtName, edpName, simName, getRV):
+def main(aimName, samName, evtName, edpName, simName, getRV): # noqa: N803, D103
#
# Find the GI and SAM files
#
- with open(aimName, encoding='utf-8') as f:
- root_AIM = json.load(f)
- GI = root_AIM['GeneralInformation']
+ with open(aimName, encoding='utf-8') as f: # noqa: PTH123
+ root_AIM = json.load(f) # noqa: N806
+ GI = root_AIM['GeneralInformation'] # noqa: N806
- with open(samName, encoding='utf-8') as f:
- SAM = json.load(f)
+ with open(samName, encoding='utf-8') as f: # noqa: PTH123
+ SAM = json.load(f) # noqa: N806
#
# Get user-uploaded filter script
#
# sy - so far works only for single model
- filterFileName = root_AIM['Simulation']['filterFileName']
- filateFilePath = root_AIM['Simulation']['filterFilePath']
+ filterFileName = root_AIM['Simulation']['filterFileName'] # noqa: N806
+ filateFilePath = root_AIM['Simulation']['filterFilePath'] # noqa: N806
sys.path.insert(0, filateFilePath)
analysis_script = importlib.__import__(
filterFileName[:-3],
@@ -79,7 +79,7 @@ def main(aimName, samName, evtName, edpName, simName, getRV):
0,
)
model_distributor = analysis_script.model_distributor
- modelName = model_distributor(GI, SAM)
+ modelName = model_distributor(GI, SAM) # noqa: N806
if getRV:
runDefault(root_AIM, aimName, samName, evtName, edpName, simName, getRV)
@@ -97,11 +97,11 @@ def main(aimName, samName, evtName, edpName, simName, getRV):
runSurrogate(modelName, GI, SAM, root_AIM, aimName, edpName)
-def runDefault(root_AIM, aimName, samName, evtName, edpName, simName, getRV=False):
+def runDefault(root_AIM, aimName, samName, evtName, edpName, simName, getRV=False): # noqa: FBT002, N802, N803, D103
#
# Find app name
#
- mySimAppName = root_AIM['Simulation']['DefaultAnalysis']['Buildings'][
+ mySimAppName = root_AIM['Simulation']['DefaultAnalysis']['Buildings'][ # noqa: N806
'Application'
]
@@ -110,36 +110,36 @@ def runDefault(root_AIM, aimName, samName, evtName, edpName, simName, getRV=Fals
#
root_AIM['Simulation'] = root_AIM['Simulation']['DefaultAnalysis']['Buildings']
- currentDir = os.getcwd()
- newAimName = os.path.join(currentDir, os.path.basename(aimName))
+ currentDir = os.getcwd() # noqa: PTH109, N806
+ newAimName = os.path.join(currentDir, os.path.basename(aimName)) # noqa: PTH118, PTH119, N806
- with open(newAimName, 'w', encoding='utf-8') as f:
+ with open(newAimName, 'w', encoding='utf-8') as f: # noqa: PTH123
json_object = json.dumps(root_AIM)
f.write(json_object)
#
# overwrite with default AIM.json file
#
s = [
- os.path.dirname(__file__),
+ os.path.dirname(__file__), # noqa: PTH120
'..',
'..',
'Workflow',
'WorkflowApplications.json',
]
- workflowAppJsonPath = os.path.join(*s)
- with open(workflowAppJsonPath, encoding='utf-8') as f:
- workflowAppDict = json.load(f)
- appList = workflowAppDict['SimulationApplications']['Applications']
- myApp = next(item for item in appList if item['Name'] == mySimAppName)
+ workflowAppJsonPath = os.path.join(*s) # noqa: PTH118, N806
+ with open(workflowAppJsonPath, encoding='utf-8') as f: # noqa: PTH123
+ workflowAppDict = json.load(f) # noqa: N806
+ appList = workflowAppDict['SimulationApplications']['Applications'] # noqa: N806
+ myApp = next(item for item in appList if item['Name'] == mySimAppName) # noqa: N806
s = [
- os.path.dirname(__file__),
+ os.path.dirname(__file__), # noqa: PTH120
'..',
'..',
'..',
- os.path.dirname(myApp['ExecutablePath']),
+ os.path.dirname(myApp['ExecutablePath']), # noqa: PTH120
]
- mySimAppPath = os.path.join(*s)
- mySimAppName = os.path.basename(myApp['ExecutablePath'])
+ mySimAppPath = os.path.join(*s) # noqa: PTH118, N806
+ mySimAppName = os.path.basename(myApp['ExecutablePath']) # noqa: PTH119, N806
#
# run correct backend app
@@ -184,12 +184,12 @@ def runDefault(root_AIM, aimName, samName, evtName, edpName, simName, getRV=Fals
)
-def runSurrogate(modelName, GI, SAM, root_AIM, aimName, edpName):
+def runSurrogate(modelName, GI, SAM, root_AIM, aimName, edpName): # noqa: C901, N802, N803, D103
#
# Augment to params.in file
#
- GIkeys = [
+ GIkeys = [ # noqa: N806
'Latitude',
'Longitude',
'NumberOfStories',
@@ -199,7 +199,7 @@ def runSurrogate(modelName, GI, SAM, root_AIM, aimName, edpName):
'PlanArea',
'ReplacementCost',
]
- SAMkeys_properties = [
+ SAMkeys_properties = [ # noqa: N806
'dampingRatio',
'K0',
'Sy',
@@ -212,59 +212,59 @@ def runSurrogate(modelName, GI, SAM, root_AIM, aimName, edpName):
'eta_soft',
'a_k',
]
- SAMkeys_nodes = ['mass']
+ SAMkeys_nodes = ['mass'] # noqa: N806
- with open('params.in') as f:
- paramsStr = f.read()
- nAddParams = 0
+ with open('params.in') as f: # noqa: PTH123
+ paramsStr = f.read() # noqa: N806
+ nAddParams = 0 # noqa: N806
for key in GI:
if key in GIkeys:
val = GI[key]
if not isinstance(val, str):
- paramsStr += f'{key} {val}\n'
+ paramsStr += f'{key} {val}\n' # noqa: N806
else:
- paramsStr += f'{key} "{val}"\n'
- nAddParams += 1
+ paramsStr += f'{key} "{val}"\n' # noqa: N806
+ nAddParams += 1 # noqa: N806
# For damping
for key in SAM['Properties']:
if key in SAMkeys_properties:
val = SAM['Properties'][key]
if not isinstance(val, str):
- paramsStr += f'{key} {val}\n'
+ paramsStr += f'{key} {val}\n' # noqa: N806
else:
- paramsStr += f'{key} "{val}"\n'
- nAddParams += 1
+ paramsStr += f'{key} "{val}"\n' # noqa: N806
+ nAddParams += 1 # noqa: N806
# For material properties
- for SAM_elem in SAM['Properties']['uniaxialMaterials']:
+ for SAM_elem in SAM['Properties']['uniaxialMaterials']: # noqa: N806
for key in SAM_elem:
if key in SAMkeys_properties:
val = SAM_elem[key]
if not isinstance(val, str):
- paramsStr += '{}-{} {}\n'.format(key, SAM_elem['name'], val)
+ paramsStr += '{}-{} {}\n'.format(key, SAM_elem['name'], val) # noqa: N806
else:
- paramsStr += '{}-{} "{}"\n'.format(key, SAM_elem['name'], val)
- nAddParams += 1
+ paramsStr += '{}-{} "{}"\n'.format(key, SAM_elem['name'], val) # noqa: N806
+ nAddParams += 1 # noqa: N806
# For mass
- for SAM_node in SAM['Geometry']['nodes']:
+ for SAM_node in SAM['Geometry']['nodes']: # noqa: N806
for key in SAM_node:
if key in SAMkeys_nodes:
val = SAM_node[key]
if not isinstance(val, str):
- paramsStr += '{}-{} {}\n'.format(key, SAM_node['name'], val)
+ paramsStr += '{}-{} {}\n'.format(key, SAM_node['name'], val) # noqa: N806
else:
- paramsStr += '{}-{} "{}"\n'.format(key, SAM_node['name'], val)
- nAddParams += 1
+ paramsStr += '{}-{} "{}"\n'.format(key, SAM_node['name'], val) # noqa: N806
+ nAddParams += 1 # noqa: N806
- stringList = paramsStr.split('\n')
+ stringList = paramsStr.split('\n') # noqa: N806
stringList.remove(stringList[0]) # remove # params (will be added later)
- stringList = set(stringList) # remove duplicates
- stringList = [i for i in stringList if i] # remove empty
- stringList = [str(len(stringList))] + stringList
- with open('params.in', 'w') as f:
+ stringList = set(stringList) # remove duplicates # noqa: N806
+ stringList = [i for i in stringList if i] # remove empty # noqa: N806
+ stringList = [str(len(stringList))] + stringList # noqa: N806, RUF005
+ with open('params.in', 'w') as f: # noqa: PTH123
f.write('\n'.join(stringList))
f.close()
@@ -273,41 +273,41 @@ def runSurrogate(modelName, GI, SAM, root_AIM, aimName, edpName):
# get sur model info
#
- surFileName = None
+ surFileName = None # noqa: N806
for model in root_AIM['Simulation']['Models']:
if model['modelName'] == modelName:
- surFileName = model['fileName']
+ surFileName = model['fileName'] # noqa: N806
if surFileName is None:
- print(f'surrogate model {modelName} is not found')
- exit(-1)
+ print(f'surrogate model {modelName} is not found') # noqa: T201
+ exit(-1) # noqa: PLR1722
#
# find surrogate model prediction app
#
s = [
- os.path.dirname(__file__),
+ os.path.dirname(__file__), # noqa: PTH120
'..',
'..',
'Workflow',
'WorkflowApplications.json',
]
- workflowAppJsonPath = os.path.join(*s)
- with open(workflowAppJsonPath, encoding='utf-8') as f:
- workflowAppDict = json.load(f)
- appList = workflowAppDict['SimulationApplications']['Applications']
- simAppName = 'SurrogateSimulation'
- myApp = next(item for item in appList if item['Name'] == simAppName)
+ workflowAppJsonPath = os.path.join(*s) # noqa: PTH118, N806
+ with open(workflowAppJsonPath, encoding='utf-8') as f: # noqa: PTH123
+ workflowAppDict = json.load(f) # noqa: N806
+ appList = workflowAppDict['SimulationApplications']['Applications'] # noqa: N806
+ simAppName = 'SurrogateSimulation' # noqa: N806
+ myApp = next(item for item in appList if item['Name'] == simAppName) # noqa: N806
s = [
- os.path.dirname(__file__),
+ os.path.dirname(__file__), # noqa: PTH120
'..',
'..',
'..',
- os.path.dirname(myApp['ExecutablePath']),
+ os.path.dirname(myApp['ExecutablePath']), # noqa: PTH120
]
- mySurrogatePath = os.path.join(*s)
- mySurrogateName = os.path.basename(myApp['ExecutablePath'])
+ mySurrogatePath = os.path.join(*s) # noqa: PTH118, N806
+ mySurrogateName = os.path.basename(myApp['ExecutablePath']) # noqa: PTH119, N806
#
# import surrogate functions
@@ -319,9 +319,9 @@ def runSurrogate(modelName, GI, SAM, root_AIM, aimName, edpName):
r'..\\..\\..\\..\\input_data\\' + surFileName
)
- currentDir = os.getcwd()
- newAimName = os.path.join(currentDir, os.path.basename(aimName))
- with open(newAimName, 'w', encoding='utf-8') as f:
+ currentDir = os.getcwd() # noqa: PTH109, N806
+ newAimName = os.path.join(currentDir, os.path.basename(aimName)) # noqa: PTH118, PTH119, N806
+ with open(newAimName, 'w', encoding='utf-8') as f: # noqa: PTH123
json_object = json.dumps(root_AIM)
f.write(json_object)
diff --git a/modules/performSIMULATION/surrogateSimulation/SurrogateSimulation.py b/modules/performSIMULATION/surrogateSimulation/SurrogateSimulation.py
index 269f19b48..190072e95 100644
--- a/modules/performSIMULATION/surrogateSimulation/SurrogateSimulation.py
+++ b/modules/performSIMULATION/surrogateSimulation/SurrogateSimulation.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -47,7 +47,7 @@
# from simcenter_common import *
-convert_EDP = {
+convert_EDP = { # noqa: N816
'max_abs_acceleration': 'PFA',
'max_rel_disp': 'PFD',
'max_drift': 'PID',
@@ -57,18 +57,18 @@
}
-def run_surrogateGP(AIM_input_path, EDP_input_path):
+def run_surrogateGP(AIM_input_path, EDP_input_path): # noqa: ARG001, N802, N803, D103
# these imports are here to save time when the app is called without
# the -getRV flag
# import openseespy.opensees as ops
- with open(AIM_input_path, encoding='utf-8') as f:
- root_AIM = json.load(f)
+ with open(AIM_input_path, encoding='utf-8') as f: # noqa: PTH123
+ root_AIM = json.load(f) # noqa: N806
# root_GI = root_AIM['GeneralInformation']
- root_SAM = root_AIM['Applications']['Modeling']
+ root_SAM = root_AIM['Applications']['Modeling'] # noqa: N806
- surrogate_path = os.path.join(
+ surrogate_path = os.path.join( # noqa: PTH118, F841
root_SAM['ApplicationData']['MS_Path'],
root_SAM['ApplicationData']['mainScript'],
)
@@ -79,27 +79,27 @@ def run_surrogateGP(AIM_input_path, EDP_input_path):
#
# Let's call GPdriver creator?
#
- pythonEXE = sys.executable
+ pythonEXE = sys.executable # noqa: N806
- surrogatePredictionPath = os.path.join(
- os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
+ surrogatePredictionPath = os.path.join( # noqa: PTH118, N806
+ os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), # noqa: PTH100, PTH120
'performFEM',
'surrogateGP',
'gpPredict.py',
)
- curpath = os.getcwd()
- params_name = os.path.join(curpath, 'params.in')
- surrogate_name = os.path.join(
+ curpath = os.getcwd() # noqa: PTH109
+ params_name = os.path.join(curpath, 'params.in') # noqa: PTH118
+ surrogate_name = os.path.join( # noqa: PTH118
curpath, root_SAM['ApplicationData']['postprocessScript']
) # pickl
- surrogate_meta_name = os.path.join(
+ surrogate_meta_name = os.path.join( # noqa: PTH118
curpath, root_SAM['ApplicationData']['mainScript']
) # json
# compute IMs
# print(f"{pythonEXE} {surrogatePredictionPath} {params_name} {surrogate_meta_name} {surrogate_name}")
- os.system(
+ os.system( # noqa: S605
f'{pythonEXE} {surrogatePredictionPath} {params_name} {surrogate_meta_name} {surrogate_name}'
)
@@ -114,26 +114,26 @@ def run_surrogateGP(AIM_input_path, EDP_input_path):
root_AIM['Applications']['Simulation']['Application']
!= 'SurrogateRegionalPy'
):
- with open('../workflow.err', 'w') as f:
+ with open('../workflow.err', 'w') as f: # noqa: PTH123
f.write(
'Do not select [None] in the FEM tab. [None] is used only when using pre-trained surrogate, i.e. when [Surrogate] is selected in the SIM Tab.'
)
- exit(-1)
+ exit(-1) # noqa: PLR1722
-def write_EDP(AIM_input_path, EDP_input_path, newEDP_input_path=None):
- with open(AIM_input_path, encoding='utf-8') as f:
- root_AIM = json.load(f)
+def write_EDP(AIM_input_path, EDP_input_path, newEDP_input_path=None): # noqa: C901, N802, N803, D103
+ with open(AIM_input_path, encoding='utf-8') as f: # noqa: PTH123
+ root_AIM = json.load(f) # noqa: N806
- if newEDP_input_path == None:
- newEDP_input_path = EDP_input_path
+ if newEDP_input_path == None: # noqa: E711
+ newEDP_input_path = EDP_input_path # noqa: N806
- root_SAM = root_AIM['Applications']['Modeling']
- curpath = os.getcwd()
+ root_SAM = root_AIM['Applications']['Modeling'] # noqa: N806
+ curpath = os.getcwd() # noqa: PTH109
# surrogate_path = os.path.join(root_SAM['ApplicationData']['MS_Path'],root_SAM['ApplicationData']['mainScript'])
- surrogate_path = os.path.join(curpath, root_SAM['ApplicationData']['mainScript'])
+ surrogate_path = os.path.join(curpath, root_SAM['ApplicationData']['mainScript']) # noqa: PTH118
- with open(surrogate_path, encoding='utf-8') as f:
+ with open(surrogate_path, encoding='utf-8') as f: # noqa: PTH123
surrogate_model = json.load(f)
#
@@ -142,14 +142,14 @@ def write_EDP(AIM_input_path, EDP_input_path, newEDP_input_path=None):
edp_names = surrogate_model['ylabels']
- if not os.path.isfile('results.out'):
+ if not os.path.isfile('results.out'): # noqa: PTH113
# not found
- print('Skiping surrogateEDP - results.out does not exist in ' + os.getcwd())
- exit(-1)
- elif os.stat('results.out').st_size == 0:
+ print('Skiping surrogateEDP - results.out does not exist in ' + os.getcwd()) # noqa: T201, PTH109
+ exit(-1) # noqa: PLR1722
+ elif os.stat('results.out').st_size == 0: # noqa: PTH116
# found but empty
- print('Skiping surrogateEDP - results.out is empty in ' + os.getcwd())
- exit(-1)
+ print('Skiping surrogateEDP - results.out is empty in ' + os.getcwd()) # noqa: T201, PTH109
+ exit(-1) # noqa: PLR1722
edp_vals = np.loadtxt('results.out').tolist()
@@ -157,48 +157,48 @@ def write_EDP(AIM_input_path, EDP_input_path, newEDP_input_path=None):
# Read EDP file, mapping between EDPnames and EDP.json and write scalar_data
#
- with open(EDP_input_path, encoding='utf-8') as f:
- rootEDP = json.load(f)
+ with open(EDP_input_path, encoding='utf-8') as f: # noqa: PTH123
+ rootEDP = json.load(f) # noqa: N806
- numEvents = len(rootEDP['EngineeringDemandParameters'])
- numResponses = rootEDP['total_number_edp']
+ numEvents = len(rootEDP['EngineeringDemandParameters']) # noqa: N806, F841
+ numResponses = rootEDP['total_number_edp'] # noqa: N806, F841
i = 0 # current event id
event = rootEDP['EngineeringDemandParameters'][i]
- eventEDPs = event['responses']
+ eventEDPs = event['responses'] # noqa: N806
for j in range(len(eventEDPs)):
- eventEDP = eventEDPs[j]
- eventType = eventEDP['type']
+ eventEDP = eventEDPs[j] # noqa: N806
+ eventType = eventEDP['type'] # noqa: N806
known = False
if eventType == 'max_abs_acceleration':
- edpAcronym = 'PFA'
+ edpAcronym = 'PFA' # noqa: N806
floor = eventEDP['floor']
known = True
elif eventType == 'max_drift':
- edpAcronym = 'PID'
+ edpAcronym = 'PID' # noqa: N806
floor = eventEDP['floor2']
known = True
elif eventType == 'max_roof_drift':
- edpAcronym = 'PRD'
+ edpAcronym = 'PRD' # noqa: N806
floor = '1'
known = True
elif eventType == 'residual_disp':
- edpAcronym = 'RD'
+ edpAcronym = 'RD' # noqa: N806
floor = eventEDP['floor']
known = True
elif eventType == 'max_pressure':
- edpAcronym = 'PSP'
+ edpAcronym = 'PSP' # noqa: N806
floor = eventEDP['floor2']
known = True
elif eventType == 'max_rel_disp':
- edpAcronym = 'PFD'
+ edpAcronym = 'PFD' # noqa: N806
floor = eventEDP['floor']
known = True
elif eventType == 'peak_wind_gust_speed':
- edpAcronym = 'PWS'
+ edpAcronym = 'PWS' # noqa: N806
floor = eventEDP['floor']
known = True
else:
- edpList = [eventType]
+ edpList = [eventType] # noqa: N806
if known:
dofs = eventEDP['dofs']
@@ -207,7 +207,7 @@ def write_EDP(AIM_input_path, EDP_input_path, newEDP_input_path=None):
my_edp_name = '1-' + edpAcronym + '-' + floor + '-' + str(dof)
idscalar = edp_names.index(my_edp_name)
scalar_data += [edp_vals[idscalar]]
- edpList = [my_edp_name]
+ edpList = [my_edp_name] # noqa: N806, F841
eventEDPs[j]['scalar_data'] = scalar_data
@@ -216,7 +216,7 @@ def write_EDP(AIM_input_path, EDP_input_path, newEDP_input_path=None):
) # Remove EQ name if exists because it is confusing
rootEDP['EngineeringDemandParameters'][0]['responses'] = eventEDPs
- with open(newEDP_input_path, 'w', encoding='utf-8') as f:
+ with open(newEDP_input_path, 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(rootEDP, f, indent=2)
diff --git a/modules/performUQ/SimCenterUQ/PLoM/PLoM.py b/modules/performUQ/SimCenterUQ/PLoM/PLoM.py
index 304492e05..76817b505 100644
--- a/modules/performUQ/SimCenterUQ/PLoM/PLoM.py
+++ b/modules/performUQ/SimCenterUQ/PLoM/PLoM.py
@@ -1,36 +1,36 @@
-# JGA
+# JGA # noqa: N999, D100
import importlib
import os
import sys
# import matplotlib.pyplot as plt
# export DISPLAY=localhost:0.0
-from ctypes import *
+from ctypes import * # noqa: F403
from pathlib import Path
import numpy as np
import pandas as pd
import PLoM_library as plom
-from general import *
+from general import * # noqa: F403
-class PLoM:
+class PLoM: # noqa: D101
def __init__(
self,
model_name='plom',
data='',
separator=',',
- col_header=False,
+ col_header=False, # noqa: FBT002
constraints=None,
- run_tag=False,
- plot_tag=False,
+ run_tag=False, # noqa: FBT002
+ plot_tag=False, # noqa: FBT002
num_rlz=5,
tol_pca=1e-6,
epsilon_kde=25,
- tol_PCA2=1e-5,
+ tol_PCA2=1e-5, # noqa: N803
tol=1e-6,
max_iter=50,
- runDiffMaps=True,
+ runDiffMaps=True, # noqa: FBT002, N803
db_path=None,
):
# basic setups
@@ -96,30 +96,30 @@ def __init__(
def _basic_config(self, model_name=None, db_path=None):
"""Basic setups
- model_name: job name (used for database name)
- """
+ """ # noqa: D205, D400, D401
if not db_path:
- self.dir_log = os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ self.dir_log = os.path.join( # noqa: PTH118
+ os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120
'RunDir',
)
- self.dir_run = os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ self.dir_run = os.path.join( # noqa: PTH118
+ os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120
'RunDir',
model_name,
)
else:
self.dir_log = db_path
- self.dir_run = os.path.join(db_path, model_name)
+ self.dir_run = os.path.join(db_path, model_name) # noqa: PTH118
# initialize logfile
try:
- os.makedirs(self.dir_run, exist_ok=True)
- self.logfile = Logfile(logfile_dir=self.dir_log)
+ os.makedirs(self.dir_run, exist_ok=True) # noqa: PTH103
+ self.logfile = Logfile(logfile_dir=self.dir_log) # noqa: F405
self.logfile.write_msg(
msg=f'PLoM: Running directory {self.dir_run} initialized.',
msg_type='RUNNING',
msg_level=0,
)
- except:
+ except: # noqa: E722
self.logfile.write_msg(
msg=f'PLoM: Running directory {self.dir_run} cannot be initialized.',
msg_type='ERROR',
@@ -127,10 +127,10 @@ def _basic_config(self, model_name=None, db_path=None):
)
# initialize database server
self.dbserver = None
- self.dbserver = DBServer(db_dir=self.dir_run, db_name=model_name + '.h5')
+ self.dbserver = DBServer(db_dir=self.dir_run, db_name=model_name + '.h5') # noqa: F405
try:
- self.dbserver = DBServer(db_dir=self.dir_run, db_name=model_name + '.h5')
- except:
+ self.dbserver = DBServer(db_dir=self.dir_run, db_name=model_name + '.h5') # noqa: F405
+ except: # noqa: E722
self.logfile.write_msg(
msg='PLoM: database server initialization failed.',
msg_type='ERROR',
@@ -143,22 +143,22 @@ def _basic_config(self, model_name=None, db_path=None):
msg_level=0,
)
# initialize visualization output path
- self.vl_path = os.path.join(self.dir_run, 'FigOut')
+ self.vl_path = os.path.join(self.dir_run, 'FigOut') # noqa: PTH118
try:
- os.makedirs(self.vl_path, exist_ok=True)
+ os.makedirs(self.vl_path, exist_ok=True) # noqa: PTH103
self.logfile.write_msg(
msg=f'PLoM: visualization folder {self.vl_path} initialized.',
msg_type='RUNNING',
msg_level=0,
)
- except:
+ except: # noqa: E722
self.logfile.write_msg(
msg=f'PLoM: visualization folder {self.vl_path} not initialized.',
msg_type='WARNING',
msg_level=0,
)
- def add_constraints(self, constraints_file=None):
+ def add_constraints(self, constraints_file=None): # noqa: D102
if not constraints_file:
self.g_c = None
self.D_x_g_c = None
@@ -181,7 +181,7 @@ def add_constraints(self, constraints_file=None):
new_constraints = importlib.__import__(
path_constraints.name[:-3], globals(), locals(), [], 0
)
- except:
+ except: # noqa: E722
self.logfile.write_msg(
msg=f'PLoM.add_constraints: could not add constraints {constraints_file}',
msg_type='ERROR',
@@ -213,7 +213,7 @@ def add_constraints(self, constraints_file=None):
self.dbserver.add_item(
item=[constraints_file], data_type='ConstraintsFile'
)
- except:
+ except: # noqa: E722
self.logfile.write_msg(
msg=f'PLoM.add_constraints: at least one attribute (i.e., g_c, D_x_gc, beta_c, or beta_c_aux) missing in {constraints_file}',
msg_type='ERROR',
@@ -225,7 +225,7 @@ def add_constraints(self, constraints_file=None):
def switch_constraints(self, constraint_tag=1):
"""Selecting different constraints
- constraint_tag: the tag of selected constraint
- """
+ """ # noqa: D205, D400, D401
if constraint_tag > self.num_constraints:
self.logfile.write_msg(
msg=f'PLoM.switch_constraints: sorry the maximum constraint tag is {self.num_constraints}',
@@ -253,7 +253,7 @@ def switch_constraints(self, constraint_tag=1):
],
data_type='ConstraintsFile',
)
- except:
+ except: # noqa: E722
self.logfile.write_msg(
msg='PLoM.get_constraints: cannot get constraints',
msg_type='ERROR',
@@ -261,22 +261,22 @@ def switch_constraints(self, constraint_tag=1):
)
def delete_constraints(self):
- """Removing all current constraints"""
+ """Removing all current constraints""" # noqa: D400, D401
self.g_c = None
self.D_x_g_c = None
self.beta_c = []
self.dbserver.add_item(item=[''], data_type='ConstraintsFile')
- def load_data(self, filename, separator=',', col_header=False):
+ def load_data(self, filename, separator=',', col_header=False): # noqa: FBT002, C901, D102
# initialize the matrix and data size
- X = []
- N = 0
+ X = [] # noqa: N806
+ N = 0 # noqa: N806
n = 0
# check if the file exist
import os
- if not os.path.exists(filename):
+ if not os.path.exists(filename): # noqa: PTH110
self.logfile.write_msg(
msg=f'load_data: the input file {filename} is not found',
msg_type='ERROR',
@@ -285,7 +285,7 @@ def load_data(self, filename, separator=',', col_header=False):
return X, N, n
# read data
- if os.path.splitext(filename)[-1] in ['.csv', '.dat', '.txt']:
+ if os.path.splitext(filename)[-1] in ['.csv', '.dat', '.txt']: # noqa: PTH122
# txt data
col = None
if col_header:
@@ -295,11 +295,11 @@ def load_data(self, filename, separator=',', col_header=False):
for cur_col in self.X0.columns:
if all(np.isnan(self.X0.loc[:, cur_col])):
self.X0.drop(columns=cur_col)
- X = self.X0.to_numpy()
+ X = self.X0.to_numpy() # noqa: N806
- elif os.path.splitext(filename)[-1] in ['.mat', '.json']:
+ elif os.path.splitext(filename)[-1] in ['.mat', '.json']: # noqa: PTH122
# json or mat
- if os.path.splitext(filename)[-1] == '.mat':
+ if os.path.splitext(filename)[-1] == '.mat': # noqa: PTH122
import scipy.io as scio
matdata = scio.loadmat(filename)
@@ -308,7 +308,7 @@ def load_data(self, filename, separator=',', col_header=False):
]
if len(var_names) == 1:
# single matrix
- X = matdata[var_names[0]]
+ X = matdata[var_names[0]] # noqa: N806
self.X0 = pd.DataFrame(
X, columns=['Var' + str(x) for x in X.shape[1]]
)
@@ -317,25 +317,25 @@ def load_data(self, filename, separator=',', col_header=False):
# multiple columns
for cur_var in var_names:
X.append(matdata[cur_var].tolist())
- X = np.array(X).T
- X = X[0, :, :]
+ X = np.array(X).T # noqa: N806
+ X = X[0, :, :] # noqa: N806
self.X0 = pd.DataFrame(X, columns=var_names)
else:
import json
- with open(filename, encoding='utf-8') as f:
+ with open(filename, encoding='utf-8') as f: # noqa: PTH123
jsondata = json.load(f)
var_names = list(jsondata.keys())
# multiple columns
for cur_var in var_names:
X.append(jsondata[cur_var])
- X = np.array(X).T
+ X = np.array(X).T # noqa: N806
self.X0 = pd.DataFrame(X, columns=var_names)
- elif os.path.splitext(filename)[-1] == '.h5':
+ elif os.path.splitext(filename)[-1] == '.h5': # noqa: PTH122
# this h5 can be either formatted by PLoM or not
# a separate method to deal with this file
- X = self.load_h5(filename)
+ X = self.load_h5(filename) # noqa: N806
else:
self.logfile.write_msg(
@@ -350,7 +350,7 @@ def load_data(self, filename, separator=',', col_header=False):
)
# Update data sizes
- N, n = X.shape
+ N, n = X.shape # noqa: N806
self.logfile.write_msg(
msg=f'PLoM.load_data: loaded data size = ({N}, {n}).',
msg_type='RUNNING',
@@ -362,26 +362,26 @@ def load_data(self, filename, separator=',', col_header=False):
# def check_var_name():
- def get_data(self):
+ def get_data(self): # noqa: D102
# return data and data sizes
return self.X, self.N, self.n
def _load_h5_plom(self, filename):
- """Loading PLoM-formatted h5 database"""
+ """Loading PLoM-formatted h5 database""" # noqa: D400, D401
try:
store = pd.HDFStore(filename, 'r')
- for cur_var in store.keys():
- if cur_var in self.dbserver.get_item_adds() and ATTR_MAP[cur_var]:
+ for cur_var in store.keys(): # noqa: SIM118
+ if cur_var in self.dbserver.get_item_adds() and ATTR_MAP[cur_var]: # noqa: F405
# read in
cur_data = store[cur_var]
cur_dshape = tuple(
- [x[0] for x in store['/DS_' + cur_var[1:]].values.tolist()]
+ [x[0] for x in store['/DS_' + cur_var[1:]].values.tolist()] # noqa: PD011
)
if cur_dshape == (1,):
- item_value = np.array(sum(cur_data.values.tolist(), []))
- col_headers = list(cur_data.columns)[0]
+ item_value = np.array(sum(cur_data.values.tolist(), [])) # noqa: PD011, RUF017
+ col_headers = list(cur_data.columns)[0] # noqa: RUF015
else:
- item_value = cur_data.values
+ item_value = cur_data.values # noqa: PD011
col_headers = list(cur_data.columns)
self.dbserver.add_item(
item_name=cur_var.replace('/', ''),
@@ -393,20 +393,20 @@ def _load_h5_plom(self, filename):
if cur_var == '/constraints_file':
cur_data = store[cur_var]
self.dbserver.add_item(
- item=cur_data.values.tolist()[0],
+ item=cur_data.values.tolist()[0], # noqa: PD011
data_type='ConstraintsFile',
)
store.close()
- except:
+ except: # noqa: E722
self.logfile.write_msg(
msg=f'PLoM._load_h5_plom: data in {filename} not compatible.',
msg_type='ERROR',
msg_level=0,
)
- def _load_h5_data_X(self, filename):
- """Loading a h5 data which is expected to contain X data"""
+ def _load_h5_data_X(self, filename): # noqa: N802
+ """Loading a h5 data which is expected to contain X data""" # noqa: D400, D401
try:
store = pd.HDFStore(filename, 'r')
# Note a table is expected for the variable
@@ -417,11 +417,11 @@ def _load_h5_data_X(self, filename):
)
return self.X0.to_numpy()
- except:
+ except: # noqa: E722
return None
def _sync_data(self):
- """Sync database data to current attributes"""
+ """Sync database data to current attributes""" # noqa: D400
avail_name_list = self.dbserver.get_name_list()
if not avail_name_list:
# empty database
@@ -435,13 +435,13 @@ def _sync_data(self):
if cur_item.startswith('/DS_'):
# skipping the data-shape attributes
continue
- if type(ATTR_MAP[cur_item]) is str:
+ if type(ATTR_MAP[cur_item]) is str: # noqa: F405
self.__setattr__(
- ATTR_MAP[cur_item],
+ ATTR_MAP[cur_item], # noqa: F405
self.dbserver.get_item(cur_item[1:]),
)
self.logfile.write_msg(
- msg=f'PLoM._sync_data: self.{ATTR_MAP[cur_item]} synced.',
+ msg=f'PLoM._sync_data: self.{ATTR_MAP[cur_item]} synced.', # noqa: F405
msg_type='RUNNING',
msg_level=0,
)
@@ -454,7 +454,7 @@ def _sync_data(self):
)
def _sync_constraints(self):
- """Sync constraints from dbserver to the attributes"""
+ """Sync constraints from dbserver to the attributes""" # noqa: D400
avail_name_list = self.dbserver.get_name_list()
if '/constraints_file' not in avail_name_list:
# empty constraints
@@ -470,7 +470,7 @@ def _sync_constraints(self):
self.add_constraints(constraints_file=cfile)
def load_h5(self, filename):
- """Loading h5 database"""
+ """Loading h5 database""" # noqa: D400, D401
try:
self._load_h5_plom(filename)
self.logfile.write_msg(
@@ -492,15 +492,15 @@ def load_h5(self, filename):
if '/X0' in self.dbserver.get_name_list():
self.X0 = self.dbserver.get_item('X0', table_like=True)
return self.X0.to_numpy()
- else:
+ else: # noqa: RET505
self.logfile.write_msg(
msg='PLoM.load_h5: the original X0 data not found in the loaded data.',
msg_type='ERROR',
msg_level=0,
)
return None
- except:
- X = self._load_h5_data_X(filename)
+ except: # noqa: E722
+ X = self._load_h5_data_X(filename) # noqa: N806
if X is None:
self.logfile.write_msg(
msg=f'PLoM.load_h5: cannot load {filename}.',
@@ -508,12 +508,12 @@ def load_h5(self, filename):
msg_level=0,
)
return None
- else:
+ else: # noqa: RET505
return X
- def add_data(self, filename, separator=',', col_header=False):
+ def add_data(self, filename, separator=',', col_header=False): # noqa: FBT002, D102
# load new data
- new_X, new_N, new_n = self.load_data(filename, separator, col_header)
+ new_X, new_N, new_n = self.load_data(filename, separator, col_header) # noqa: N806
# check data sizes
if new_n != self.n:
self.logfile.write_msg(
@@ -533,17 +533,17 @@ def add_data(self, filename, separator=',', col_header=False):
msg_level=0,
)
- def initialize_data(
+ def initialize_data( # noqa: D102
self,
filename,
separator=',',
- col_header=False,
- constraints='',
+ col_header=False, # noqa: FBT002
+ constraints='', # noqa: ARG002
):
# initialize the data and data sizes
try:
self.X, self.N, self.n = self.load_data(filename, separator, col_header)
- except:
+ except: # noqa: E722
self.logfile.write_msg(
msg=f'PLoM.initialize_data: cannot initialize data with {filename}',
msg_type='ERROR',
@@ -574,18 +574,18 @@ def initialize_data(
return 0
def _init_indv_tasks(self):
- """Initializing tasks"""
- for cur_task in FULL_TASK_LIST:
- self.__setattr__('task_' + cur_task, Task(task_name=cur_task))
+ """Initializing tasks""" # noqa: D400, D401
+ for cur_task in FULL_TASK_LIST: # noqa: F405
+ self.__setattr__('task_' + cur_task, Task(task_name=cur_task)) # noqa: F405
- def ConfigTasks(self, task_list=FULL_TASK_LIST):
+ def ConfigTasks(self, task_list=FULL_TASK_LIST): # noqa: C901, N802, F405
"""Creating a task list object
- task_list: a string list of tasks to run
- """
+ """ # noqa: D205, D400, D401
config_flag = True
self.cur_task_list = task_list
# check task orders
- if not all([x in FULL_TASK_LIST for x in self.cur_task_list]):
+ if not all([x in FULL_TASK_LIST for x in self.cur_task_list]): # noqa: C419, F405
self.logfile.write_msg(
msg='PLoM.config_tasks: task name not recognized.',
msg_type='ERROR',
@@ -593,13 +593,13 @@ def ConfigTasks(self, task_list=FULL_TASK_LIST):
)
self.logfile.write_msg(
msg='PLoM.config_tasks: acceptable task names: {}.'.format(
- ','.join(FULL_TASK_LIST)
+ ','.join(FULL_TASK_LIST) # noqa: F405
),
msg_type='WARNING',
msg_level=0,
)
return False
- map_order = [FULL_TASK_LIST.index(x) for x in self.cur_task_list]
+ map_order = [FULL_TASK_LIST.index(x) for x in self.cur_task_list] # noqa: F405
if map_order != sorted(map_order):
self.logfile.write_msg(
msg='PLoM.config_tasks: task order error.',
@@ -608,7 +608,7 @@ def ConfigTasks(self, task_list=FULL_TASK_LIST):
)
self.logfile.write_msg(
msg='PLoM.config_tasks: please follow this order: {}.'.format(
- '->'.join(FULL_TASK_LIST)
+ '->'.join(FULL_TASK_LIST) # noqa: F405
),
msg_type='WARNING',
msg_level=0,
@@ -616,7 +616,7 @@ def ConfigTasks(self, task_list=FULL_TASK_LIST):
return False
if (max(map_order) - min(map_order) + 1) != len(map_order):
# intermediate tasks missing -> since the jobs are in chain, so here the default is to automatically fill in any missing tasks in the middle
- self.cur_task_list = FULL_TASK_LIST[min(map_order) : max(map_order) + 1]
+ self.cur_task_list = FULL_TASK_LIST[min(map_order) : max(map_order) + 1] # noqa: F405
self.logfile.write_msg(
msg='PLoM.config_tasks: intermediate task(s) missing and being filled in automatically.',
msg_type='WARNING',
@@ -630,14 +630,14 @@ def ConfigTasks(self, task_list=FULL_TASK_LIST):
msg_level=0,
)
# initializing the task list
- self.task_list = TaskList()
+ self.task_list = TaskList() # noqa: F405
# initializing individual tasks and refreshing status
self._init_indv_tasks()
- for cur_task in FULL_TASK_LIST:
- self.__getattribute__('task_' + cur_task).full_var_list = TASK_ITEM_MAP[
+ for cur_task in FULL_TASK_LIST: # noqa: F405
+ self.__getattribute__('task_' + cur_task).full_var_list = TASK_ITEM_MAP[ # noqa: F405
cur_task
]
- for cur_item in TASK_ITEM_MAP[cur_task]:
+ for cur_item in TASK_ITEM_MAP[cur_task]: # noqa: F405
if '/' + cur_item in self.dbserver.get_name_list():
self.__getattribute__('task_' + cur_task).avail_var_list.append(
cur_item
@@ -652,7 +652,7 @@ def ConfigTasks(self, task_list=FULL_TASK_LIST):
self.task_list.refresh_status()
# need to check the task chain if all dependent tasks completed to go
# otherwise, the current run could not be completed
- pre_task_list = FULL_TASK_LIST[: FULL_TASK_LIST.index(self.cur_task_list[0])]
+ pre_task_list = FULL_TASK_LIST[: FULL_TASK_LIST.index(self.cur_task_list[0])] # noqa: F405
if len(pre_task_list):
for cur_task in pre_task_list:
if not self.__getattribute__('task_' + cur_task).refresh_status():
@@ -663,8 +663,8 @@ def ConfigTasks(self, task_list=FULL_TASK_LIST):
msg_level=0,
)
- if config_flag:
- self.logfile.write_msg(
+ if config_flag: # noqa: RET503
+ self.logfile.write_msg( # noqa: RET503
msg='PLoM.config_tasks: the following tasks is configured to run: {}.'.format(
'->'.join(self.cur_task_list)
),
@@ -672,18 +672,18 @@ def ConfigTasks(self, task_list=FULL_TASK_LIST):
msg_level=0,
)
- def RunAlgorithm(
+ def RunAlgorithm( # noqa: C901, N802
self,
n_mc=5,
epsilon_pca=1e-6,
epsilon_kde=25,
- tol_PCA2=1e-5,
+ tol_PCA2=1e-5, # noqa: N803
tol=1e-6,
max_iter=50,
- plot_tag=False,
- runDiffMaps=None,
+ plot_tag=False, # noqa: FBT002
+ runDiffMaps=None, # noqa: N803
seed_num=None,
- tolKDE=0.1,
+ tolKDE=0.1, # noqa: N803
):
"""Running the PLoM algorithm to train the model and generate new realizations
- n_mc: realization/sample size ratio
@@ -691,9 +691,9 @@ def RunAlgorithm(
- epsilon_kde: smoothing parameter in the kernel density estimation
- tol: tolerance in the PLoM iterations
- max_iter: maximum number of iterations of the PLoM algorithm
- """
- if runDiffMaps == None:
- runDiffMaps = self.runDiffMaps
+ """ # noqa: D205, D400, D401
+ if runDiffMaps == None: # noqa: E711
+ runDiffMaps = self.runDiffMaps # noqa: N806
else:
self.runDiffMaps = runDiffMaps
@@ -923,7 +923,7 @@ def RunAlgorithm(
)
break
# refresh status
- for cur_item in TASK_ITEM_MAP[cur_task.task_name]:
+ for cur_item in TASK_ITEM_MAP[cur_task.task_name]: # noqa: F405
if '/' + cur_item in self.dbserver.get_name_list():
self.__getattribute__(
'task_' + cur_task.task_name
@@ -953,19 +953,19 @@ def RunAlgorithm(
msg_level=0,
)
- def DataNormalization(self, X):
+ def DataNormalization(self, X): # noqa: N802, N803
"""Normalizing the X
- X: the data matrix to be normalized
- """
+ """ # noqa: D205, D400, D401
# scaling
- X_scaled, alpha, x_min = plom.scaling(X)
+ X_scaled, alpha, x_min = plom.scaling(X) # noqa: N806
x_mean = plom.mean(X_scaled)
return X_scaled, alpha, x_min, x_mean
- def RunPCA(self, X_origin, epsilon_pca):
+ def RunPCA(self, X_origin, epsilon_pca): # noqa: N802, N803, D102
# ...PCA...
- (H, mu, phi, errors) = plom.PCA(X_origin, epsilon_pca)
+ (H, mu, phi, errors) = plom.PCA(X_origin, epsilon_pca) # noqa: N806
nu = len(H)
self.logfile.write_msg(
msg=f'PLoM.RunPCA: considered number of PCA components = {nu}',
@@ -987,17 +987,17 @@ def RunPCA(self, X_origin, epsilon_pca):
"""
return H, mu, phi, nu, errors
- def RunKDE(self, X, epsilon_kde):
+ def RunKDE(self, X, epsilon_kde): # noqa: N802, N803
"""Running Kernel Density Estimation
- X: the data matrix to be reduced
- epsilon_kde: smoothing parameter in the kernel density estimation
- """
+ """ # noqa: D205, D400, D401
(s_v, c_v, hat_s_v) = plom.parameters_kde(X)
- K, b = plom.K(X, epsilon_kde)
+ K, b = plom.K(X, epsilon_kde) # noqa: N806
return s_v, c_v, hat_s_v, K, b
- def DiffMaps(self, H, K, b, tol=0.1):
+ def DiffMaps(self, H, K, b, tol=0.1): # noqa: N802, N803, D102
# ..diff maps basis...
# self.Z = PCA(self.H)
try:
@@ -1005,7 +1005,7 @@ def DiffMaps(self, H, K, b, tol=0.1):
g = g.real
m = plom.m(eigenvalues, tol=tol)
a = g[:, 0:m].dot(np.linalg.inv(np.transpose(g[:, 0:m]).dot(g[:, 0:m])))
- Z = H.dot(a)
+ Z = H.dot(a) # noqa: N806
"""
if self.plot_tag:
fig, ax = plt.subplots(figsize=(6,4))
@@ -1016,11 +1016,11 @@ def DiffMaps(self, H, K, b, tol=0.1):
plt.savefig(os.path.join(self.vl_path,'KDE_EigenValue.png'),dpi=480)
self.logfile.write_msg(msg='PLoM: {} saved in {}.'.format('KDE_EigenValue.png',self.vl_path),msg_type='RUNNING',msg_level=0)
"""
- except:
+ except: # noqa: E722
g = None
m = 0
a = None
- Z = None
+ Z = None # noqa: N806
eigenvalues = []
self.logfile.write_msg(
msg='PLoM.DiffMaps: diffusion maps failed.',
@@ -1030,15 +1030,15 @@ def DiffMaps(self, H, K, b, tol=0.1):
return g, m, a, Z, eigenvalues
- def ISDEGeneration(
+ def ISDEGeneration( # noqa: N802
self,
n_mc=5,
- tol_PCA2=1e-5,
+ tol_PCA2=1e-5, # noqa: N803
tol=0.02,
max_iter=50,
seed_num=None,
):
- """The construction of a nonlinear Ito Stochastic Differential Equation (ISDE) to generate realizations of random variable H"""
+ """The construction of a nonlinear Ito Stochastic Differential Equation (ISDE) to generate realizations of random variable H""" # noqa: D400, D401
if seed_num:
np.random.seed(seed_num)
# constraints
@@ -1078,14 +1078,14 @@ def ISDEGeneration(
while (
iteration < max_iter
and self.errors[iteration] > tol * self.errors[0]
- and (increasing_iterations < 3)
+ and (increasing_iterations < 3) # noqa: PLR2004
):
self.logfile.write_msg(
msg=f'PLoM.ISDEGeneration: running iteration {iteration + 1}.',
msg_type='RUNNING',
msg_level=0,
)
- Hnewvalues, nu_lambda, x_, x_2 = plom.generator(
+ Hnewvalues, nu_lambda, x_, x_2 = plom.generator( # noqa: N806
self.Z,
self.Y,
self.a,
@@ -1150,7 +1150,7 @@ def ISDEGeneration(
else:
nu_init = np.random.normal(size=(int(self.nu), int(self.N)))
self.Y = nu_init.dot(self.a)
- Hnewvalues, nu_lambda, x_, x_2 = plom.generator(
+ Hnewvalues, nu_lambda, x_, x_2 = plom.generator( # noqa: N806
self.Z,
self.Y,
self.a,
@@ -1184,11 +1184,11 @@ def ISDEGeneration(
# unscale
self.Xnew = np.diag(self.alpha).dot(self.Xnew) + self.x_min
- def export_results(self, data_list=[], file_format_list=['csv']):
+ def export_results(self, data_list=[], file_format_list=['csv']): # noqa: B006
"""Exporting results by the data names
- data_list: list of data names
- file_format_list: list of output formats
- """
+ """ # noqa: D205, D400, D401
avail_name_list = self.dbserver.get_name_list()
if not data_list:
# print available data names
@@ -1216,18 +1216,18 @@ def export_results(self, data_list=[], file_format_list=['csv']):
else:
try:
ff_i = file_format_list[tag]
- except:
+ except: # noqa: E722
ff_i = file_format_list[-1]
ex_flag = self.dbserver.export(
data_name=data_i, file_format=ff_i
)
- if type(ex_flag) == int and ex_flat == 1:
+ if type(ex_flag) == int and ex_flat == 1: # noqa: E721, F405
self.logfile.write_msg(
msg=f'PLoM.export_results: {data_i} is not found and skipped.',
msg_type='WARNING',
msg_level=0,
)
- elif type(ex_flag) == int and ex_flag == 2:
+ elif type(ex_flag) == int and ex_flag == 2: # noqa: E721, PLR2004
self.logfile.write_msg(
msg=f'PLoM.export_results: {ff_i} is not supported yest.',
msg_type='ERROR',
@@ -1302,4 +1302,4 @@ def PostProcess():
blue_patch = mpatches.Patch(color='blue', label='X')
plt.legend(handles=[red_patch, blue_patch])
plt.show()
- """
+ """ # noqa: E101
diff --git a/modules/performUQ/SimCenterUQ/PLoM/PLoM_library.py b/modules/performUQ/SimCenterUQ/PLoM/PLoM_library.py
index a22e38d52..40295f405 100644
--- a/modules/performUQ/SimCenterUQ/PLoM/PLoM_library.py
+++ b/modules/performUQ/SimCenterUQ/PLoM/PLoM_library.py
@@ -1,8 +1,8 @@
-# JGA
+# JGA # noqa: N999, D100
# from matplotlib import pyplot as plt
import os
import platform
-from ctypes import *
+from ctypes import * # noqa: F403
from math import exp, log, pi, sqrt
from sys import platform as pltm
@@ -11,43 +11,43 @@
from scipy import integrate
if pltm == 'linux' or pltm == 'linux2':
- c_lib = CDLL(
- os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ c_lib = CDLL( # noqa: F405
+ os.path.join( # noqa: PTH118
+ os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120
'lib/linux/PLoM_C_library.so',
)
)
elif pltm == 'darwin':
if platform.processor() == 'arm':
- c_lib = CDLL(
- os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ c_lib = CDLL( # noqa: F405
+ os.path.join( # noqa: PTH118
+ os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120
'lib/macOS_m1/PLoM_C_library.so',
)
)
else:
- c_lib = CDLL(
- os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ c_lib = CDLL( # noqa: F405
+ os.path.join( # noqa: PTH118
+ os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120
'lib/macOS/PLoM_C_library.so',
)
)
elif pltm == 'win32':
- c_lib = CDLL(
- os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ c_lib = CDLL( # noqa: F405
+ os.path.join( # noqa: PTH118
+ os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120
'lib/win/PLoM_C_library.so',
)
)
-c_lib.rho.restype = c_double
+c_lib.rho.restype = c_double # noqa: F405
c_lib.rho.argtypes = [
np.ctypeslib.ndpointer(dtype=np.float64),
np.ctypeslib.ndpointer(dtype=np.float64),
- c_int,
- c_int,
- c_double,
- c_double,
+ c_int, # noqa: F405
+ c_int, # noqa: F405
+ c_double, # noqa: F405
+ c_double, # noqa: F405
]
c_lib.gradient_rho.restype = np.ctypeslib.ndpointer(dtype=np.float64)
@@ -55,20 +55,20 @@
np.ctypeslib.ndpointer(dtype=np.float64),
np.ctypeslib.ndpointer(dtype=np.float64),
np.ctypeslib.ndpointer(dtype=np.float64),
- c_int,
- c_int,
- c_double,
- c_double,
+ c_int, # noqa: F405
+ c_int, # noqa: F405
+ c_double, # noqa: F405
+ c_double, # noqa: F405
]
-def rhoctypes(y, eta, nu, N, s_v, hat_s_v):
+def rhoctypes(y, eta, nu, N, s_v, hat_s_v): # noqa: N803, D103
return c_lib.rho(
np.array(y, np.float64), np.array(eta, np.float64), nu, N, s_v, hat_s_v
)
-def scaling(x):
+def scaling(x): # noqa: D103
n = x.shape[0]
alpha = np.zeros(n)
x_min = np.zeros((n, 1))
@@ -84,7 +84,7 @@ def scaling(x):
return x_scaled, alpha, x_min
-def gradient_rhoctypes(gradient, y, eta, nu, N, s_v, hat_s_v):
+def gradient_rhoctypes(gradient, y, eta, nu, N, s_v, hat_s_v): # noqa: N803, D103
return c_lib.gradient_rho(
np.array(gradient, np.float64),
np.array(y, np.float64),
@@ -99,20 +99,20 @@ def gradient_rhoctypes(gradient, y, eta, nu, N, s_v, hat_s_v):
def kernel(x, y, epsilon):
""">>> kernel(np.array([1,0]), np.array([1,0]), 0.5)
1.0
- """
+ """ # noqa: D205, D400
dist = np.linalg.norm(x - y) ** 2
k = np.exp(-dist / (4 * epsilon))
- return k
+ return k # noqa: RET504
-def K(eta, epsilon):
+def K(eta, epsilon): # noqa: N802
""">>> K((np.array([[1,1],[1,1]])), 3)
(array([[1., 1.],
[1., 1.]]), array([[2., 0.],
[0., 2.]]))
- """
- N = eta.shape[1]
- K = np.zeros((N, N))
+ """ # noqa: D205, D400
+ N = eta.shape[1] # noqa: N806
+ K = np.zeros((N, N)) # noqa: N806
b = np.zeros((N, N))
for i in range(N):
row_sum = 0
@@ -127,11 +127,11 @@ def K(eta, epsilon):
return K, b
-def g(K, b):
+def g(K, b): # noqa: N803
""">>> g((np.array([[1,0.5],[0.5,1]])), np.array([[1.5, 0.], [0., 1.5]]))
(array([[ 0.57735027, -0.57735027],
[ 0.57735027, 0.57735027]]), array([1. , 0.33333333]))
- """
+ """ # noqa: D205, D400
invb = np.diag(1 / np.diag(b))
inv_sqrt_b = np.sqrt(invb)
xi = np.linalg.eigh(inv_sqrt_b.dot(K).dot(inv_sqrt_b))
@@ -148,7 +148,7 @@ def g(K, b):
def m(eigenvalues, tol=0.1):
""">>> m(np.array([1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.05, 0.025]))
11
- """
+ """ # noqa: D205, D400
i = 2
m = 0
while i < len(eigenvalues) and m == 0:
@@ -165,7 +165,7 @@ def mean(x):
array([[1. ],
[0.5],
[3. ]])
- """
+ """ # noqa: D205, D400
dim = x.shape[0]
x_mean = np.zeros((dim, 1))
for i in range(dim):
@@ -178,24 +178,24 @@ def covariance(x):
array([[0. , 0. , 0. ],
[0. , 0.5, 1. ],
[0. , 1. , 2. ]])
- """
+ """ # noqa: D205, D400
dim = x.shape[0]
- N = x.shape[1]
- C = np.zeros((dim, dim))
+ N = x.shape[1] # noqa: N806
+ C = np.zeros((dim, dim)) # noqa: N806
x_mean = mean(x)
for i in range(N):
- C = C + (np.resize(x[:, i], x_mean.shape) - x_mean).dot(
+ C = C + (np.resize(x[:, i], x_mean.shape) - x_mean).dot( # noqa: N806
np.transpose(np.resize(x[:, i], x_mean.shape) - x_mean)
)
return C / (N - 1)
-def PCA(x, tol):
+def PCA(x, tol): # noqa: N802
""">>> PCA(np.array([[1,1],[0,1],[2,4]]), 0.1)
(array([[-0.70710678, 0.70710678]]), array([1.58113883]), array([[-1.13483031e-17],
[ 4.47213595e-01],
[ 8.94427191e-01]]))
- """
+ """ # noqa: D205, D400
x_mean = mean(x)
(phi, mu, v) = np.linalg.svd(x - x_mean)
mu = mu / sqrt(len(x[0]) - 1)
@@ -237,9 +237,9 @@ def PCA(x, tol):
def parameters_kde(eta):
""">>> parameters_kde(np.array([[1,1],[0,1],[2,4]]))
(0.8773066621237415, 0.13452737030512696, 0.7785858648409519)
- """
+ """ # noqa: D205, D400
nu = eta.shape[0]
- N = eta.shape[1]
+ N = eta.shape[1] # noqa: N806
s_v = (4 / (N * (2 + nu))) ** (1 / (nu + 4)) # (4/(N*(2+nu)))**(1/(nu+4))
hat_s_v = s_v / sqrt(s_v**2 + ((N - 1) / N))
c_v = 1 / (sqrt(2 * pi) * hat_s_v) ** nu
@@ -249,10 +249,10 @@ def parameters_kde(eta):
def kde(y, eta, s_v=None, c_v=None, hat_s_v=None):
""">>> kde(np.array([[1, 2, 3]]), np.array([[1,1],[0,1],[2,4]]))
0.01940049487135241
- """
+ """ # noqa: D205, D400
nu = eta.shape[0]
- N = eta.shape[1]
- if s_v == None or c_v == None or hat_s_v == None:
+ N = eta.shape[1] # noqa: N806
+ if s_v == None or c_v == None or hat_s_v == None: # noqa: E711
s_v, c_v, hat_s_v = parameters_kde(eta)
return c_v * rhoctypes(
np.resize(y, (y.shape[0] * y.shape[1], 1)),
@@ -265,12 +265,12 @@ def kde(y, eta, s_v=None, c_v=None, hat_s_v=None):
# taking only independent constraints
-def PCA2(C_h_hat_eta, beta, tol):
+def PCA2(C_h_hat_eta, beta, tol): # noqa: N802, N803
""">>> PCA2(np.array([[1. , 1. , 1. ], [1. , 4.5, 1.5 ], [1. , 1.5 , 2. ]]), np.array([10, 1, 2]), 0.1)
(array([-4.53648062, 5.2236145 ]), array([[-0.28104828, 0.42570005],
[-0.85525695, -0.51768266],
[-0.43537043, 0.74214832]]))
- """
+ """ # noqa: D205, D400
(lambda_c, psi) = np.linalg.eig(
C_h_hat_eta
) # eigenvalue decomposition as the dimensions are not so big
@@ -291,28 +291,28 @@ def PCA2(C_h_hat_eta, beta, tol):
return b_c, psi
-def h_c(eta, g_c, phi, mu, psi, x_mean):
+def h_c(eta, g_c, phi, mu, psi, x_mean): # noqa: D103
return np.transpose(psi).dot(g_c(x_mean + phi.dot(np.diag(mu)).dot(eta)))
-def gradient_gamma(b_c, eta_lambda, g_c, phi, mu, psi, x_mean):
+def gradient_gamma(b_c, eta_lambda, g_c, phi, mu, psi, x_mean): # noqa: D103
return (b_c) - mean(
h_c(eta_lambda, g_c, phi, mu, psi, x_mean)
) # the mean is the empirical expectation
-def hessian_gamma(eta_lambda, psi, g_c, phi, mu, x_mean):
+def hessian_gamma(eta_lambda, psi, g_c, phi, mu, x_mean): # noqa: D103
return covariance(h_c(eta_lambda, g_c, phi, mu, psi, x_mean))
-def solve_inverse(matrix):
+def solve_inverse(matrix): # noqa: D103
if matrix.shape[0] != matrix.shape[1]:
return Logfile().write_msg(
msg='PLoM: solve_inverse non-square matrix.',
msg_type='ERROR',
msg_level=0,
)
- else:
+ else: # noqa: RET505
inverse = np.zeros(matrix.shape)
for j in range(matrix.shape[1]):
unit = np.zeros(matrix.shape[1])
@@ -322,7 +322,7 @@ def solve_inverse(matrix):
return inverse
-def generator(
+def generator( # noqa: D103, PLR0913
z_init,
y_init,
a,
@@ -337,19 +337,19 @@ def generator(
psi=0,
lambda_i=0,
g_c=0,
- D_x_g_c=0,
+ D_x_g_c=0, # noqa: N803
seed_num=None,
):
if seed_num:
np.random.seed(seed_num)
delta_t = 2 * pi * hat_s_v / 20
- print('delta t: ', delta_t)
+ print('delta t: ', delta_t) # noqa: T201
f_0 = 1.5
l_0 = 10 # 200
- M_0 = 10 # 20
+ M_0 = 10 # 20 # noqa: N806
beta = f_0 * delta_t / 4
nu = z_init.shape[0]
- N = a.shape[0]
+ N = a.shape[0] # noqa: N806
eta_lambda = np.zeros((nu, (n_mc + 1) * N))
nu_lambda = np.zeros((nu, (n_mc + 1) * N))
n = x_mean.shape[0]
@@ -359,12 +359,12 @@ def generator(
y_l = y_init
eta_lambda[:, 0:N] = z_init.dot(np.transpose(g))
nu_lambda[:, 0:N] = y_init.dot(np.transpose(g))
- for i in range(l_0):
+ for i in range(l_0): # noqa: B007
z_l_half = z_l + delta_t * 0.5 * y_l
w_l_1 = np.random.normal(scale=sqrt(delta_t), size=(nu, N)).dot(
a
) # wiener process
- L_l_half = L(
+ L_l_half = L( # noqa: N806
z_l_half.dot(np.transpose(g)),
g_c,
x_mean,
@@ -384,12 +384,12 @@ def generator(
)
z_l = z_l_half + delta_t * 0.5 * y_l_1
y_l = y_l_1
- for l in range(M_0, M_0 * (n_mc + 1)):
+ for l in range(M_0, M_0 * (n_mc + 1)): # noqa: E741
z_l_half = z_l + delta_t * 0.5 * y_l
w_l_1 = np.random.normal(scale=sqrt(delta_t), size=(nu, N)).dot(
a
) # wiener process
- L_l_half = L(
+ L_l_half = L( # noqa: N806
z_l_half.dot(np.transpose(g)),
g_c,
x_mean,
@@ -432,15 +432,15 @@ def generator(
return eta_lambda[:, N:], nu_lambda[:, N:], x_, x_2
-def ac(sig):
+def ac(sig): # noqa: D103
sig = sig - np.mean(sig)
sft = np.fft.rfft(np.concatenate((sig, 0 * sig)))
return np.fft.irfft(np.conj(sft) * sft)
-def L(
+def L( # noqa: N802, D103
y,
- g_c,
+ g_c, # noqa: ARG001
x_mean,
eta,
s_v,
@@ -449,12 +449,12 @@ def L(
phi,
psi,
lambda_i,
- D_x_g_c,
+ D_x_g_c, # noqa: N803
): # gradient of the potential
nu = eta.shape[0]
- N = eta.shape[1]
- L = np.zeros((nu, N))
- for l in range(N):
+ N = eta.shape[1] # noqa: N806
+ L = np.zeros((nu, N)) # noqa: N806
+ for l in range(N): # noqa: E741
yl = np.resize(y[:, l], (len(y[:, l]), 1))
rho_ = rhoctypes(
yl, np.resize(np.transpose(eta), (nu * N, 1)), nu, N, s_v, hat_s_v
@@ -468,7 +468,7 @@ def L(
else:
# not constraints and no D_x_g_c
grad_g_c = np.zeros((x_mean.shape[0], 1))
- if rho_ < 1e-250:
+ if rho_ < 1e-250: # noqa: PLR2004
closest = 1e30
for i in range(N):
if closest > np.linalg.norm(
@@ -492,7 +492,7 @@ def L(
)
else:
- array_pointer = cast(
+ array_pointer = cast( # noqa: F405
gradient_rhoctypes(
np.zeros((nu, 1)),
yl,
@@ -502,7 +502,7 @@ def L(
s_v,
hat_s_v,
),
- POINTER(c_double * nu),
+ POINTER(c_double * nu), # noqa: F405
)
gradient_rho = np.frombuffer(array_pointer.contents)
# KZ L[:,l] = np.resize(1e250*gradient_rho/rho_,(nu))\
@@ -520,19 +520,19 @@ def L(
return L
-def err(gradient, b_c):
+def err(gradient, b_c): # noqa: D103
return np.linalg.norm(gradient) / np.linalg.norm(b_c)
-def gamma(lambda_i, eta, s_v, hat_s_v, g_c, phi, mu, psi, x_mean, b_c):
+def gamma(lambda_i, eta, s_v, hat_s_v, g_c, phi, mu, psi, x_mean, b_c): # noqa: D103
return np.transpose(lambda_i).dot(b_c) + log(
inv_c_0(lambda_i, eta, s_v, hat_s_v, g_c, phi, mu, psi, x_mean)
)
-def func(x, y, eta, s_v, hat_s_v, g_c, phi, mu, psi, x_mean, lambda_i):
+def func(x, y, eta, s_v, hat_s_v, g_c, phi, mu, psi, x_mean, lambda_i): # noqa: D103
nu = eta.shape[0]
- N = eta.shape[1]
+ N = eta.shape[1] # noqa: N806
return rhoctypes(
np.array([x, y]),
np.resize(np.transpose(eta), (nu * N, 1)),
@@ -547,11 +547,11 @@ def func(x, y, eta, s_v, hat_s_v, g_c, phi, mu, psi, x_mean, lambda_i):
)
-def gaussian_bell(x, y):
+def gaussian_bell(x, y): # noqa: D103
return exp(-(x**2 + y**2) / 2) / (2 * pi)
-def inv_c_0(lambda_i, eta, s_v, hat_s_v, g_c, phi, mu, psi, x_mean):
+def inv_c_0(lambda_i, eta, s_v, hat_s_v, g_c, phi, mu, psi, x_mean): # noqa: D103
c, error = integrate.dblquad(
func,
-3,
@@ -563,19 +563,19 @@ def inv_c_0(lambda_i, eta, s_v, hat_s_v, g_c, phi, mu, psi, x_mean):
return c # integral mathematica
-def expo(y):
+def expo(y): # noqa: D103
meann = np.array([[0], [0]])
- sigma = np.array([[1, 0], [0, 1]])
+ sigma = np.array([[1, 0], [0, 1]]) # noqa: F841
f = exp(-0.5 * np.transpose(y - meann).dot(y - meann))
- return f
+ return f # noqa: RET504
-def gradient_expo(y):
+def gradient_expo(y): # noqa: D103
meann = np.array([[0], [0]])
- sigma = np.array([[1, 0], [0, 1]])
+ sigma = np.array([[1, 0], [0, 1]]) # noqa: F841
f = np.zeros((2, 1))
f = -(y - meann) * exp(-0.5 * np.transpose(y - meann).dot(y - meann))
- return f
+ return f # noqa: RET504
if __name__ == '__main__':
diff --git a/modules/performUQ/SimCenterUQ/PLoM/__init__.py b/modules/performUQ/SimCenterUQ/PLoM/__init__.py
index 2ca7a991a..a7a9739b2 100644
--- a/modules/performUQ/SimCenterUQ/PLoM/__init__.py
+++ b/modules/performUQ/SimCenterUQ/PLoM/__init__.py
@@ -1,4 +1,4 @@
-import os
+import os # noqa: N999, D104
import sys
-sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) # noqa: PTH100, PTH120
diff --git a/modules/performUQ/SimCenterUQ/PLoM/general.py b/modules/performUQ/SimCenterUQ/PLoM/general.py
index 74965a704..19b6d05d7 100644
--- a/modules/performUQ/SimCenterUQ/PLoM/general.py
+++ b/modules/performUQ/SimCenterUQ/PLoM/general.py
@@ -1,4 +1,4 @@
-# Constants, variables, and methods that are commonly used
+# Constants, variables, and methods that are commonly used # noqa: D100
import os
from collections import Counter
@@ -28,7 +28,7 @@
]
ITEM_LIST_ISDEGENE = ['Errors', 'X_new']
ITEM_LIST = (
- ['basic']
+ ['basic'] # noqa: RUF005
+ ['constraints_file']
+ ['X0', 'N', 'n']
+ ITEM_LIST_DATANORM
@@ -75,16 +75,16 @@
}
-class Logfile:
- def __init__(self, logfile_dir='./', logfile_name='plom.log', screen_msg=True):
+class Logfile: # noqa: D101
+ def __init__(self, logfile_dir='./', logfile_name='plom.log', screen_msg=True): # noqa: FBT002
"""Initializing the logfile
- logfile_dir: default is the same path of the PLoM package
- logfile_name: default is the "plom.log"
- screen_msg: default is to show message on screen
- """
+ """ # noqa: D205, D400, D401
self.logfile_dir = logfile_dir
self.logfile_name = logfile_name
- self.logfile_path = os.path.join(self.logfile_dir, self.logfile_name)
+ self.logfile_path = os.path.join(self.logfile_dir, self.logfile_name) # noqa: PTH118
self.screen_msg = screen_msg
# start the log
self.write_msg(msg='--NEW LOG STARTING FROM THIS LINE--', mode='w')
@@ -94,35 +94,35 @@ def write_msg(self, msg='', msg_type='RUNNING', msg_level=0, mode='a'):
- msg: the message
- msg_type: the type of message 'RUNNING', 'WARNING', 'ERROR'
- msg_level: how many indent tags
- """
+ """ # noqa: D205, D400, D401
indent_tabs = ''.join(['\t'] * msg_level)
- decorated_msg = f'{datetime.utcnow()} {indent_tabs} {msg_type}-MSG {msg} '
+ decorated_msg = f'{datetime.utcnow()} {indent_tabs} {msg_type}-MSG {msg} ' # noqa: DTZ003
if self.screen_msg:
- print(decorated_msg)
- with open(self.logfile_path, mode) as f:
+ print(decorated_msg) # noqa: T201
+ with open(self.logfile_path, mode) as f: # noqa: PTH123
f.write('\n' + decorated_msg)
def delete_logfile(self):
- """Deleting the log file"""
- if os.path.exists(self.logfile_path):
- os.remove(self.logfile_path)
+ """Deleting the log file""" # noqa: D400, D401
+ if os.path.exists(self.logfile_path): # noqa: PTH110
+ os.remove(self.logfile_path) # noqa: PTH107
else:
- print(f'The logfile {self.logfile_path} does not exist.')
+ print(f'The logfile {self.logfile_path} does not exist.') # noqa: T201
-class DBServer:
+class DBServer: # noqa: D101
def __init__(self, db_dir='./', db_name='plom.h5'):
"""Initializing the database
- db_dir: default is the same path of the PLoM package
- db_name: default is "plom.h5"
- """
+ """ # noqa: D205, D400, D401
self.db_dir = db_dir
self.db_name = db_name
- self.db_path = os.path.join(self.db_dir, self.db_name)
- if os.path.exists(self.db_path):
+ self.db_path = os.path.join(self.db_dir, self.db_name) # noqa: PTH118
+ if os.path.exists(self.db_path): # noqa: PTH110
# deleting the old database
- os.remove(self.db_path)
- self.init_time = datetime.utcnow()
+ os.remove(self.db_path) # noqa: PTH107
+ self.init_time = datetime.utcnow() # noqa: DTZ003
self.item_name_list = []
self.basic()
self.dir_export = self._create_export_dir()
@@ -130,11 +130,11 @@ def __init__(self, db_dir='./', db_name='plom.h5'):
self._item_adds = ITEM_ADDS
def basic(self):
- """Writing basic info"""
- df = pd.DataFrame.from_dict(
+ """Writing basic info""" # noqa: D400, D401
+ df = pd.DataFrame.from_dict( # noqa: PD901
{
'InitializedTime': [self.init_time],
- 'LastEditedTime': [datetime.utcnow()],
+ 'LastEditedTime': [datetime.utcnow()], # noqa: DTZ003
'DBName': [self.db_name],
},
dtype=str,
@@ -145,71 +145,71 @@ def basic(self):
self.add_item(item=[''], data_type='ConstraintsFile')
def _create_export_dir(self):
- """Creating a export folder"""
- dir_export = os.path.join(self.db_dir, 'DataOut')
+ """Creating a export folder""" # noqa: D400, D401
+ dir_export = os.path.join(self.db_dir, 'DataOut') # noqa: PTH118
try:
- os.makedirs(dir_export, exist_ok=True)
- return dir_export
- except:
+ os.makedirs(dir_export, exist_ok=True) # noqa: PTH103
+ return dir_export # noqa: TRY300
+ except: # noqa: E722
return None
def get_item_adds(self):
- """Returning the full list of data items"""
+ """Returning the full list of data items""" # noqa: D400, D401
return self._item_adds
def add_item(
self,
item_name=None,
col_names=None,
- item=[],
+ item=[], # noqa: B006
data_shape=None,
data_type='Data',
):
- """Adding a new data item into database"""
+ """Adding a new data item into database""" # noqa: D400
if data_type == 'Data':
if item.size > 1:
- df = pd.DataFrame(item, columns=col_names)
+ df = pd.DataFrame(item, columns=col_names) # noqa: PD901
dshape = pd.DataFrame(data_shape, columns=['DS_' + item_name])
else:
if col_names is None:
col_names = item_name
- df = pd.DataFrame.from_dict({col_names: item.tolist()})
+ df = pd.DataFrame.from_dict({col_names: item.tolist()}) # noqa: PD901
dshape = pd.DataFrame.from_dict({'DS_' + col_names: (1,)})
- if item_name is not None:
+ if item_name is not None: # noqa: RET503
store = pd.HDFStore(self.db_path, 'a')
# data item
df.to_hdf(store, item_name, mode='a')
# data shape
dshape.to_hdf(store, 'DS_' + item_name, mode='a')
- store.close()
+ store.close() # noqa: RET503
elif data_type == 'ConstraintsFile':
# constraints filename
cf = pd.DataFrame.from_dict({'ConstraintsFile': item}, dtype=str)
store = pd.HDFStore(self.db_path, 'a')
cf.to_hdf(store, 'constraints_file', mode='a')
- store.close()
+ store.close() # noqa: RET503
else:
# Not supported data_type
return False
- def get_item(self, item_name=None, table_like=False, data_type='Data'):
- """Getting a specific data item"""
- if data_type == 'Data':
- if item_name is not None:
+ def get_item(self, item_name=None, table_like=False, data_type='Data'): # noqa: FBT002
+ """Getting a specific data item""" # noqa: D400, D401
+ if data_type == 'Data': # noqa: RET503
+ if item_name is not None: # noqa: RET503
store = pd.HDFStore(self.db_path, 'r')
try:
item = store.get(item_name)
item_shape = tuple(
[
x[0]
- for x in self.get_item_shape(
+ for x in self.get_item_shape( # noqa: PD011
item_name=item_name
).values.tolist()
]
)
if not table_like:
item = item.to_numpy().reshape(item_shape)
- except:
+ except: # noqa: E722
item = None
finally:
store.close()
@@ -219,42 +219,42 @@ def get_item(self, item_name=None, table_like=False, data_type='Data'):
store = pd.HDFStore(self.db_path, 'r')
try:
item = store.get('/constraints_file')
- except:
+ except: # noqa: E722
item = None
finally:
store.close()
- return item.values.tolist()[0][0]
+ return item.values.tolist()[0][0] # noqa: PD011
def remove_item(self, item_name=None):
- """Removing an item"""
+ """Removing an item""" # noqa: D400, D401
if item_name is not None:
store = pd.HDFStore(self.db_path, 'r')
try:
store.remove(item_name)
- except:
- item = None
+ except: # noqa: E722
+ item = None # noqa: F841
finally:
store.close()
def get_item_shape(self, item_name=None):
- """Getting the shape of a specific data item"""
- if item_name is not None:
+ """Getting the shape of a specific data item""" # noqa: D400, D401
+ if item_name is not None: # noqa: RET503
store = pd.HDFStore(self.db_path, 'r')
try:
item_shape = store.get('DS_' + item_name)
- except:
+ except: # noqa: E722
item_shape = None
store.close()
return item_shape
def get_name_list(self):
- """Returning the keys of the database"""
+ """Returning the keys of the database""" # noqa: D400, D401
store = pd.HDFStore(self.db_path, 'r')
try:
name_list = store.keys()
- except:
+ except: # noqa: E722
name_list = []
store.close()
return name_list
@@ -263,23 +263,23 @@ def export(self, data_name=None, filename=None, file_format='csv'):
"""Exporting the specific data item
- data_name: data tag
- format: data format
- """
+ """ # noqa: D205, D400, D401
d = self.get_item(item_name=data_name[1:], table_like=True)
if d is None:
return 1
if filename is None:
- filename = os.path.join(
+ filename = os.path.join( # noqa: PTH118
self.dir_export, str(data_name).replace('/', '') + '.' + file_format
)
else:
- filename = os.path.join(
+ filename = os.path.join( # noqa: PTH118
self.dir_export, filename.split('.')[0] + '.' + file_format
)
- if file_format == 'csv' or 'txt':
+ if file_format == 'csv' or 'txt': # noqa: SIM222
d.to_csv(filename, header=True, index=True)
elif file_format == 'json':
- with open(filename, 'w', encoding='utf-8') as f:
- json.dump(d, f)
+ with open(filename, 'w', encoding='utf-8') as f: # noqa: PTH123
+ json.dump(d, f) # noqa: F821
else:
return 2
return filename
@@ -288,12 +288,12 @@ def export(self, data_name=None, filename=None, file_format='csv'):
class Task:
"""This is a class for managering an individual task in
the PLoM running process
- """
+ """ # noqa: D205, D400, D404
def __init__(self, task_name=None):
"""Initialization
- task_name: name of the task
- """
+ """ # noqa: D205, D400, D401
self.task_name = task_name # task name
self.pre_task = None # previous task
self.next_task = None # next task
@@ -304,7 +304,7 @@ def __init__(self, task_name=None):
def refresh_status(self):
"""Refreshing the current status of the task
If any of the previous tasks is not completed, the current task is also not reliable
- """
+ """ # noqa: D205, D400, D401
# check the previous task if any
if self.pre_task:
if not self.pre_task.refresh_status():
@@ -327,18 +327,18 @@ def refresh_status(self):
class TaskList:
"""This is a class for managering a set of tasks
in a specific order
- """
+ """ # noqa: D205, D400, D404
def __init__(self):
self.head_task = None # first task
self.tail_task = None # last task
self.status = False # status
- def add_task(self, new_task=None):
+ def add_task(self, new_task=None): # noqa: D102
if new_task is None:
self.head_task = None
return
- elif self.head_task is None:
+ elif self.head_task is None: # noqa: RET505
# first task
self.head_task = new_task
self.tail_task = new_task
@@ -349,8 +349,8 @@ def add_task(self, new_task=None):
self.tail_task = new_task
def refresh_status(self):
- """Refreshing the tasks' status"""
- if self.head_task:
+ """Refreshing the tasks' status""" # noqa: D400, D401
+ if self.head_task: # noqa: RET503
cur_task = self.head_task
if not cur_task.status:
self.status = False
diff --git a/modules/performUQ/SimCenterUQ/SimCenterUQ.py b/modules/performUQ/SimCenterUQ/SimCenterUQ.py
index 2b53a67e1..e68f646dd 100644
--- a/modules/performUQ/SimCenterUQ/SimCenterUQ.py
+++ b/modules/performUQ/SimCenterUQ/SimCenterUQ.py
@@ -1,11 +1,11 @@
-# written: UQ team @ SimCenter
+# written: UQ team @ SimCenter # noqa: INP001, D100
# import functions for Python 2.X support
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
@@ -18,7 +18,7 @@
from pathlib import Path
-def main(args):
+def main(args): # noqa: D103
parser = argparse.ArgumentParser()
parser.add_argument('--workflowInput')
@@ -28,12 +28,12 @@ def main(args):
args, unknowns = parser.parse_known_args()
- inputFile = args.workflowInput
- runType = args.runType
- workflowDriver = args.driverFile
- outputFile = args.workflowOutput
+ inputFile = args.workflowInput # noqa: N806
+ runType = args.runType # noqa: N806
+ workflowDriver = args.driverFile # noqa: N806
+ outputFile = args.workflowOutput # noqa: N806, F841
- with open(inputFile, encoding='utf-8') as f:
+ with open(inputFile, encoding='utf-8') as f: # noqa: PTH123
data = json.load(f)
if runType == 'runningLocal':
@@ -46,41 +46,41 @@ def main(args):
surrogate = 'surrogateBuild.py'
plom = 'runPLoM.py' # KZ: main script of PLoM
# natafExe = os.path.join('nataf_gsa','nataf_gsa')
- natafExe = 'nataf_gsa'
- osType = 'Linux'
- workflowDriver1 = 'workflowDriver1'
+ natafExe = 'nataf_gsa' # noqa: N806
+ osType = 'Linux' # noqa: N806
+ workflowDriver1 = 'workflowDriver1' # noqa: N806
python = 'python3'
else:
surrogate = 'surrogateBuild.py'
plom = 'runPLoM.py' # KZ: main script of PLoM
# natafExe = os.path.join('nataf_gsa','nataf_gsa.exe')
- natafExe = 'nataf_gsa.exe'
- workflowDriver = workflowDriver + '.bat'
- workflowDriver1 = 'workflowDriver1.bat'
- osType = 'Windows'
+ natafExe = 'nataf_gsa.exe' # noqa: N806
+ workflowDriver = workflowDriver + '.bat' # noqa: N806
+ workflowDriver1 = 'workflowDriver1.bat' # noqa: N806, F841
+ osType = 'Windows' # noqa: N806
python = 'python'
- cwd = os.getcwd()
+ cwd = os.getcwd() # noqa: PTH109
workdir_main = str(Path(cwd).parents[0])
- print('CWD: ' + cwd)
- print('work_dir: ' + workdir_main)
+ print('CWD: ' + cwd) # noqa: T201
+ print('work_dir: ' + workdir_main) # noqa: T201
# open the input json file
- with open(inputFile, encoding='utf-8') as data_file:
+ with open(inputFile, encoding='utf-8') as data_file: # noqa: PTH123
data = json.load(data_file)
uq_data = data['UQ']
- myScriptDir = os.path.dirname(os.path.realpath(__file__))
+ myScriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N806
- if os.path.exists(workflowDriver):
- os.chmod(workflowDriver, stat.S_IXUSR | stat.S_IRUSR | stat.S_IXOTH)
+ if os.path.exists(workflowDriver): # noqa: PTH110
+ os.chmod(workflowDriver, stat.S_IXUSR | stat.S_IRUSR | stat.S_IXOTH) # noqa: PTH101
- st = os.stat(workflowDriver)
- os.chmod(workflowDriver, st.st_mode | stat.S_IEXEC)
+ st = os.stat(workflowDriver) # noqa: PTH116
+ os.chmod(workflowDriver, st.st_mode | stat.S_IEXEC) # noqa: PTH101
else:
- print(workflowDriver + ' not found.')
+ print(workflowDriver + ' not found.') # noqa: T201
# change dir to the main working dir for the structure
os.chdir('../')
@@ -96,17 +96,17 @@ def main(args):
"""
if uq_data['uqType'] == 'Train GP Surrogate Model':
- simCenterUQCommand = f'"{python}" "{myScriptDir}/{surrogate}" "{workdir_main}" {inputFile} {workflowDriver} {osType} {runType} 1> logFileSimUQ.txt 2>&1'
+ simCenterUQCommand = f'"{python}" "{myScriptDir}/{surrogate}" "{workdir_main}" {inputFile} {workflowDriver} {osType} {runType} 1> logFileSimUQ.txt 2>&1' # noqa: N806
elif (
uq_data['uqType'] == 'Sensitivity Analysis'
or uq_data['uqType'] == 'Forward Propagation'
):
- simCenterUQCommand = f'"{myScriptDir}/{natafExe}" "{workdir_main}" {inputFile} {workflowDriver} {osType} {runType} 1> logFileSimUQ.txt 2>&1'
+ simCenterUQCommand = f'"{myScriptDir}/{natafExe}" "{workdir_main}" {inputFile} {workflowDriver} {osType} {runType} 1> logFileSimUQ.txt 2>&1' # noqa: N806
# KZ: training calling runPLoM.py to launch the model training
elif uq_data['uqType'] == 'PLoM Model':
- simCenterUQCommand = '"{}" "{}" "{}" {} {} {} {}'.format(
+ simCenterUQCommand = '"{}" "{}" "{}" {} {} {} {}'.format( # noqa: N806
python,
- os.path.join(myScriptDir, plom).replace('\\', '/'),
+ os.path.join(myScriptDir, plom).replace('\\', '/'), # noqa: PTH118
workdir_main.replace('\\', '/'),
inputFile,
workflowDriver,
@@ -134,20 +134,20 @@ def main(args):
# elif uq_data['uqType'] == 'PLoM Model':
# simCenterUQCommand = '"{}" "{}" "{}" {} {} {} {}'.format(python, os.path.join(myScriptDir,plom).replace('\\','/'),workdir_main.replace('\\','/'),inputFile,workflowDriver,osType,runType)
- print('running SimCenterUQ: ', simCenterUQCommand)
+ print('running SimCenterUQ: ', simCenterUQCommand) # noqa: T201
# subprocess.Popen(simCenterUQCommand, shell=True).wait()
try:
- result = subprocess.check_output(
+ result = subprocess.check_output( # noqa: S602
simCenterUQCommand, stderr=subprocess.STDOUT, shell=True
)
returncode = 0
- print('DONE SUCESS')
+ print('DONE SUCESS') # noqa: T201
except subprocess.CalledProcessError as e:
- result = e.output
- returncode = e.returncode
- print('DONE FAIL')
+ result = e.output # noqa: F841
+ returncode = e.returncode # noqa: F841
+ print('DONE FAIL') # noqa: T201
if __name__ == '__main__':
diff --git a/modules/performUQ/SimCenterUQ/UQengine.py b/modules/performUQ/SimCenterUQ/UQengine.py
index 6cd6e89e1..74ca792e6 100644
--- a/modules/performUQ/SimCenterUQ/UQengine.py
+++ b/modules/performUQ/SimCenterUQ/UQengine.py
@@ -1,4 +1,4 @@
-import glob
+import glob # noqa: INP001, D100
import json
import os
import shutil
@@ -11,8 +11,8 @@
import pandas as pd
-class UQengine:
- def __init__(self, inputArgs):
+class UQengine: # noqa: D101
+ def __init__(self, inputArgs): # noqa: N803
self.work_dir = inputArgs[1].replace(os.sep, '/')
self.inputFile = inputArgs[2]
self.workflowDriver = inputArgs[3]
@@ -21,81 +21,81 @@ def __init__(self, inputArgs):
self.IM_names = [] # used in EEUQ
- jsonPath = self.inputFile
- if not os.path.isabs(jsonPath):
+ jsonPath = self.inputFile # noqa: N806
+ if not os.path.isabs(jsonPath): # noqa: PTH117
# for quoFEM
- jsonPath = self.work_dir + '/templatedir/' + self.inputFile
+ jsonPath = self.work_dir + '/templatedir/' + self.inputFile # noqa: N806
# temporary for EEUQ....
- jsonDir, jsonName = os.path.split(jsonPath)
- eeJsonPath = os.path.join(jsonDir, 'sc_' + jsonName)
+ jsonDir, jsonName = os.path.split(jsonPath) # noqa: N806
+ eeJsonPath = os.path.join(jsonDir, 'sc_' + jsonName) # noqa: PTH118, N806
- if os.path.exists(eeJsonPath):
+ if os.path.exists(eeJsonPath): # noqa: PTH110
self.inputFile = eeJsonPath
- jsonPath = eeJsonPath
+ jsonPath = eeJsonPath # noqa: N806
- with open(jsonPath) as f:
- dakotaJson = json.load(f)
+ with open(jsonPath) as f: # noqa: PTH123
+ dakotaJson = json.load(f) # noqa: N806, F841
# self.workflowDriver = "workflow_driver"
# if self.os_type.lower().startswith('win'):
# self.workflowDriver = "workflow_driver.bat"
- def cleanup_workdir(self):
+ def cleanup_workdir(self): # noqa: C901, D102
# if template dir already contains results.out, give an error
# Cleanup working directory if needed
- del_paths = glob.glob(os.path.join(self.work_dir, 'workdir*'))
+ del_paths = glob.glob(os.path.join(self.work_dir, 'workdir*')) # noqa: PTH118, PTH207
for del_path in del_paths:
# change permission for workflow_driver.bat
- self.workflowDriver_path = os.path.join(del_path, self.workflowDriver)
+ self.workflowDriver_path = os.path.join(del_path, self.workflowDriver) # noqa: PTH118
# if os.path.exists(self.workflowDriver_path):
# os.chmod(self.workflowDriver_path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
# Change permission
for root, dirs, files in os.walk(del_path):
for d in dirs:
- os.chmod(
- os.path.join(root, d),
- stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO,
+ os.chmod( # noqa: PTH101
+ os.path.join(root, d), # noqa: PTH118
+ stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO, # noqa: S103
)
for f in files:
- os.chmod(
- os.path.join(root, f),
- stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO,
+ os.chmod( # noqa: PTH101
+ os.path.join(root, f), # noqa: PTH118
+ stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO, # noqa: S103
)
try:
shutil.rmtree(del_path)
- except Exception as msg:
+ except Exception as msg: # noqa: BLE001
self.exit(str(msg))
- del_outputs = glob.glob(os.path.join(self.work_dir, '*out'))
+ del_outputs = glob.glob(os.path.join(self.work_dir, '*out')) # noqa: PTH118, PTH207
for del_out in del_outputs:
- os.remove(del_out)
+ os.remove(del_out) # noqa: PTH107
- del_pkls = glob.glob(os.path.join(self.work_dir, '*pkl'))
+ del_pkls = glob.glob(os.path.join(self.work_dir, '*pkl')) # noqa: PTH118, PTH207
for del_pkl in del_pkls:
- os.remove(del_pkl)
+ os.remove(del_pkl) # noqa: PTH107
try:
- del_errs = glob.glob(os.path.join(self.work_dir, '*err'))
+ del_errs = glob.glob(os.path.join(self.work_dir, '*err')) # noqa: PTH118, PTH207
for del_err in del_errs:
- os.remove(del_err)
- except:
+ os.remove(del_err) # noqa: PTH107
+ except: # noqa: S110, E722
pass
- if glob.glob(os.path.join(self.work_dir, 'templatedir', 'results.out')):
+ if glob.glob(os.path.join(self.work_dir, 'templatedir', 'results.out')): # noqa: PTH118, PTH207
try:
- os.remove(os.path.join(self.work_dir, 'templatedir', 'results.out'))
- except:
+ os.remove(os.path.join(self.work_dir, 'templatedir', 'results.out')) # noqa: PTH107, PTH118
+ except: # noqa: E722
msg = 'Your main folder (where the main FEM script is located) already contains results.out. To prevent any confusion, please delete this file first'
self.exit(msg)
- print('working directory cleared')
+ print('working directory cleared') # noqa: T201
- def set_FEM(self, rv_name, do_parallel, y_dim, t_init, t_thr):
+ def set_FEM(self, rv_name, do_parallel, y_dim, t_init, t_thr): # noqa: N802, D102
self.rv_name = rv_name
self.do_parallel = do_parallel
self.y_dim = y_dim
@@ -103,21 +103,21 @@ def set_FEM(self, rv_name, do_parallel, y_dim, t_init, t_thr):
self.t_thr = t_thr
self.total_sim_time = 0
- def run_FEM_batch(self, X, id_sim, runIdx=0, alterInput=[]):
+ def run_FEM_batch(self, X, id_sim, runIdx=0, alterInput=[]): # noqa: B006, C901, N802, N803, D102
if runIdx == -1:
# dummy run
return X, np.zeros((0, self.y_dim)), id_sim
- workflowDriver = self.workflowDriver
+ workflowDriver = self.workflowDriver # noqa: N806
#
# serial run
#
- X = np.atleast_2d(X)
+ X = np.atleast_2d(X) # noqa: N806
nsamp = X.shape[0]
if not self.do_parallel:
- Y = np.zeros((nsamp, self.y_dim))
+ Y = np.zeros((nsamp, self.y_dim)) # noqa: N806
for ns in range(nsamp):
- Y_tmp, id_sim_current = run_FEM(
+ Y_tmp, id_sim_current = run_FEM( # noqa: N806
X[ns, :],
id_sim + ns,
self.rv_name,
@@ -126,22 +126,22 @@ def run_FEM_batch(self, X, id_sim, runIdx=0, alterInput=[]):
runIdx,
)
if Y_tmp.shape[0] != self.y_dim:
- msg = f'model output in sample {ns} contains {Y_tmp.shape[0]} value(s) while the number of QoIs specified is {y_dim}'
+ msg = f'model output in sample {ns} contains {Y_tmp.shape[0]} value(s) while the number of QoIs specified is {y_dim}' # noqa: F821
self.exit(msg)
Y[ns, :] = Y_tmp
if time.time() - self.t_init > self.t_thr:
- X = X[:ns, :]
- Y = Y[:ns, :]
+ X = X[:ns, :] # noqa: N806
+ Y = Y[:ns, :] # noqa: N806
break
- Nsim = id_sim_current - id_sim + 1
+ Nsim = id_sim_current - id_sim + 1 # noqa: N806
#
# parallel run
#
if self.do_parallel:
- print(f'Running {nsamp} simulations in parallel')
+ print(f'Running {nsamp} simulations in parallel') # noqa: T201
tmp = time.time()
iterables = (
(
@@ -156,17 +156,17 @@ def run_FEM_batch(self, X, id_sim, runIdx=0, alterInput=[]):
)
try:
result_objs = list(self.pool.starmap(run_FEM, iterables))
- print(f'Simulation time = {time.time() - tmp} s')
+ print(f'Simulation time = {time.time() - tmp} s') # noqa: T201
except KeyboardInterrupt:
- print('Ctrl+c received, terminating and joining pool.')
+ print('Ctrl+c received, terminating and joining pool.') # noqa: T201
try:
self.pool.shutdown()
- except Exception:
+ except Exception: # noqa: BLE001
sys.exit()
- Nsim = len(list(result_objs))
- Y = np.zeros((Nsim, self.y_dim))
- for val, id in result_objs:
+ Nsim = len(list(result_objs)) # noqa: N806
+ Y = np.zeros((Nsim, self.y_dim)) # noqa: N806
+ for val, id in result_objs: # noqa: A001
if isinstance(val, str):
self.exit(val)
elif val.shape[0]:
@@ -175,15 +175,15 @@ def run_FEM_batch(self, X, id_sim, runIdx=0, alterInput=[]):
self.exit(msg)
if np.isnan(np.sum(val)):
- Nsim = id - id_sim
- X = X[:Nsim, :]
- Y = Y[:Nsim, :]
+ Nsim = id - id_sim # noqa: N806
+ X = X[:Nsim, :] # noqa: N806
+ Y = Y[:Nsim, :] # noqa: N806
else:
Y[id - id_sim, :] = val
if len(alterInput) > 0:
idx = alterInput[0]
- X = np.hstack([X[:, :idx], X[:, idx + 1 :]])
+ X = np.hstack([X[:, :idx], X[:, idx + 1 :]]) # noqa: N806
# IM_vals = self.compute_IM(id_sim+1, id_sim + Nsim)
# IM_list = list(map(str, IM_vals))[1:]
@@ -197,38 +197,38 @@ def run_FEM_batch(self, X, id_sim, runIdx=0, alterInput=[]):
# In case EEUQ
#
- IM_vals = self.compute_IM(id_sim + 1, id_sim + Nsim)
+ IM_vals = self.compute_IM(id_sim + 1, id_sim + Nsim) # noqa: N806
if IM_vals is None:
- X = X.astype(np.double)
+ X = X.astype(np.double) # noqa: N806
else:
self.IM_names = list(map(str, IM_vals))[1:]
- X_new = np.hstack([X, IM_vals.to_numpy()[:, 1:]])
- X = X_new.astype(np.double)
+ X_new = np.hstack([X, IM_vals.to_numpy()[:, 1:]]) # noqa: N806
+ X = X_new.astype(np.double) # noqa: N806
return X, Y, id_sim + Nsim
- def compute_IM(self, i_begin, i_end):
+ def compute_IM(self, i_begin, i_end): # noqa: N802, D102
workdir_list = [
- os.path.join(self.work_dir, f'workdir.{int(i)}')
+ os.path.join(self.work_dir, f'workdir.{int(i)}') # noqa: PTH118
for i in range(i_begin, i_end + 1)
]
# intensity measure app
- computeIM = os.path.join(
- os.path.dirname(
- os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ computeIM = os.path.join( # noqa: PTH118, N806
+ os.path.dirname( # noqa: PTH120
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # noqa: PTH100, PTH120
),
'createEVENT',
'groundMotionIM',
'IntensityMeasureComputer.py',
)
- pythonEXE = sys.executable
+ pythonEXE = sys.executable # noqa: N806
# compute IMs
for cur_workdir in workdir_list:
os.chdir(cur_workdir)
- if os.path.exists('EVENT.json') and os.path.exists('AIM.json'):
- os.system(
+ if os.path.exists('EVENT.json') and os.path.exists('AIM.json'): # noqa: PTH110
+ os.system( # noqa: S605
f'{pythonEXE} {computeIM} --filenameAIM AIM.json --filenameEVENT EVENT.json --filenameIM IM.json --geoMeanVar'
)
os.chdir(self.work_dir)
@@ -236,14 +236,14 @@ def compute_IM(self, i_begin, i_end):
# collect IMs from different workdirs
for i, cur_workdir in enumerate(workdir_list):
cur_id = int(cur_workdir.split('.')[-1])
- if os.path.exists(os.path.join(cur_workdir, 'IM.csv')):
- print(f'IM.csv found in wordir.{cur_id}')
+ if os.path.exists(os.path.join(cur_workdir, 'IM.csv')): # noqa: PTH110, PTH118
+ print(f'IM.csv found in wordir.{cur_id}') # noqa: T201
tmp1 = pd.read_csv(
- os.path.join(cur_workdir, 'IM.csv'),
+ os.path.join(cur_workdir, 'IM.csv'), # noqa: PTH118
index_col=None,
)
if tmp1.empty:
- print(f'IM.csv in wordir.{cur_id} is empty.')
+ print(f'IM.csv in wordir.{cur_id} is empty.') # noqa: T201
return None
tmp2 = pd.DataFrame(
{'%eval_id': [cur_id for x in range(len(tmp1.index))]}
@@ -254,17 +254,17 @@ def compute_IM(self, i_begin, i_end):
tmp3 = pd.concat([tmp2, tmp1], axis=1)
im_collector = pd.concat([im_collector, tmp3])
else:
- print(f'IM.csv NOT found in wordir.{cur_id}')
+ print(f'IM.csv NOT found in wordir.{cur_id}') # noqa: T201
return None
im_collector = im_collector.sort_values(by=['%eval_id'])
- return im_collector
+ return im_collector # noqa: RET504
# im_collector.to_csv('IM.csv', index=False)
- def readJson(self):
+ def readJson(self): # noqa: N802, D102
pass
- def make_pool(
+ def make_pool( # noqa: D102
self,
):
if self.run_type.lower() == 'runninglocal':
@@ -285,19 +285,19 @@ def make_pool(
# Someplace to write down error messages
#
- def create_errLog(self):
+ def create_errLog(self): # noqa: N802, D102
# self.errfile = open(os.path.join(self.work_dir, "dakota.err"), "a")
pass
- def exit(self, msg):
- print(msg, file=sys.stderr)
- print(msg)
+ def exit(self, msg): # noqa: D102
+ print(msg, file=sys.stderr) # noqa: T201
+ print(msg) # noqa: T201
# sys.stderr.write(msg)
# self.errfile.write(msg)
# self.errfile.close()
- exit(-1)
+ exit(-1) # noqa: PLR1722
- def terminate_errLog(self):
+ def terminate_errLog(self): # noqa: N802, D102
# self.errfile.close()
pass
@@ -306,15 +306,15 @@ def terminate_errLog(self):
#
-def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver, runIdx=0):
+def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver, runIdx=0): # noqa: C901, N802, N803, D103
if runIdx == 0:
- templatedirFolder = '/templatedir'
- workdirFolder = '/workdir.' + str(id_sim + 1)
+ templatedirFolder = '/templatedir' # noqa: N806
+ workdirFolder = '/workdir.' + str(id_sim + 1) # noqa: N806
else:
- templatedirFolder = '/templatedir.' + str(runIdx)
- workdirFolder = '/workdir.' + str(runIdx) + '.' + str(id_sim + 1)
+ templatedirFolder = '/templatedir.' + str(runIdx) # noqa: N806
+ workdirFolder = '/workdir.' + str(runIdx) + '.' + str(id_sim + 1) # noqa: N806
- X = np.atleast_2d(X)
+ X = np.atleast_2d(X) # noqa: N806
x_dim = X.shape[1]
if X.shape[0] > 1:
@@ -327,11 +327,11 @@ def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver, runIdx=0):
current_dir_i = work_dir + workdirFolder
try:
shutil.copytree(work_dir + templatedirFolder, current_dir_i)
- except Exception:
+ except Exception: # noqa: BLE001
try:
shutil.copytree(work_dir + templatedirFolder, current_dir_i)
- except Exception as ex:
+ except Exception as ex: # noqa: BLE001
msg = 'Error running FEM: ' + str(ex)
return msg, id_sim
@@ -339,16 +339,16 @@ def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver, runIdx=0):
# (2) write param.in file
#
- outF = open(current_dir_i + '/params.in', 'w')
+ outF = open(current_dir_i + '/params.in', 'w') # noqa: SIM115, PTH123, N806
outF.write(f'{x_dim}\n')
for i in range(x_dim):
outF.write(f'{rv_name[i]} {X[0, i]}\n')
outF.close()
if runIdx == 0:
- print(f'RUNNING FEM: working directory {id_sim + 1} created')
+ print(f'RUNNING FEM: working directory {id_sim + 1} created') # noqa: T201
else:
- print(f'RUNNING FEM: working directory {runIdx}-{id_sim + 1} created')
+ print(f'RUNNING FEM: working directory {runIdx}-{id_sim + 1} created') # noqa: T201
#
# (3) run workflow_driver.bat
@@ -363,7 +363,7 @@ def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver, runIdx=0):
# stderr=subprocess.STDOUT,
# ) # subprocess.check_call(workflow_run_command, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
# => to end grasefully
- returnCode = subprocess.call(
+ returnCode = subprocess.call( # noqa: S602, N806, F841
workflow_run_command,
shell=True,
stdout=subprocess.DEVNULL,
@@ -374,47 +374,47 @@ def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver, runIdx=0):
# (4) reading results
#
- if glob.glob('results.out'):
+ if glob.glob('results.out'): # noqa: PTH207
g = np.loadtxt('results.out').flatten()
else:
msg = 'Error running FEM: results.out missing at ' + current_dir_i
- if glob.glob('ops.out'):
- with open('ops.out') as text_file:
- error_FEM = text_file.read()
+ if glob.glob('ops.out'): # noqa: PTH207
+ with open('ops.out') as text_file: # noqa: PTH123
+ error_FEM = text_file.read() # noqa: N806
- startingCharId = error_FEM.lower().find('error')
+ startingCharId = error_FEM.lower().find('error') # noqa: N806
if startingCharId > 0:
- startingCharId = max(0, startingCharId - 20)
- endingID = max(len(error_FEM), startingCharId + 200)
+ startingCharId = max(0, startingCharId - 20) # noqa: N806
+ endingID = max(len(error_FEM), startingCharId + 200) # noqa: N806
errmsg = error_FEM[startingCharId:endingID]
errmsg = errmsg.split(' ', 1)[1]
errmsg = errmsg[0 : errmsg.rfind(' ')]
msg += '\n'
msg += 'your FEM model says...\n'
msg += '........\n' + errmsg + '\n........ \n'
- msg += 'to read more, see ' + os.path.join(os.getcwd(), 'ops.out')
+ msg += 'to read more, see ' + os.path.join(os.getcwd(), 'ops.out') # noqa: PTH109, PTH118
return msg, id_sim
if g.shape[0] == 0:
msg = 'Error running FEM: results.out is empty'
- if glob.glob('ops.out'):
- with open('ops.out') as text_file:
- error_FEM = text_file.read()
+ if glob.glob('ops.out'): # noqa: PTH207
+ with open('ops.out') as text_file: # noqa: PTH123
+ error_FEM = text_file.read() # noqa: N806
- startingCharId = error_FEM.lower().find('error')
+ startingCharId = error_FEM.lower().find('error') # noqa: N806
if startingCharId > 0:
- startingCharId = max(0, startingCharId - 20)
- endingID = max(len(error_FEM), startingCharId + 200)
+ startingCharId = max(0, startingCharId - 20) # noqa: N806
+ endingID = max(len(error_FEM), startingCharId + 200) # noqa: N806
errmsg = error_FEM[startingCharId:endingID]
errmsg = errmsg.split(' ', 1)[1]
errmsg = errmsg[0 : errmsg.rfind(' ')]
msg += '\n'
msg += 'your FEM model says...\n'
msg += '........\n' + errmsg + '\n........ \n'
- msg += 'to read more, see ' + os.path.join(os.getcwd(), 'ops.out')
+ msg += 'to read more, see ' + os.path.join(os.getcwd(), 'ops.out') # noqa: PTH109, PTH118
return msg, id_sim
diff --git a/modules/performUQ/SimCenterUQ/nataf_gsa/conanfile.py b/modules/performUQ/SimCenterUQ/nataf_gsa/conanfile.py
index cc1c84047..f51f6d845 100644
--- a/modules/performUQ/SimCenterUQ/nataf_gsa/conanfile.py
+++ b/modules/performUQ/SimCenterUQ/nataf_gsa/conanfile.py
@@ -1,16 +1,16 @@
-import os
+import os # noqa: INP001, D100
from conans import CMake, ConanFile
-class simCenterBackendApps(ConanFile):
+class simCenterBackendApps(ConanFile): # noqa: D101
name = 'nataf_gsa_cpp_mpi'
version = '1.0.0'
description = 'Software for creating nataf_gsa'
license = 'BSD 2-Clause'
- settings = {'os': None, 'build_type': None, 'compiler': None, 'arch': ['x86_64']}
- options = {'shared': [True, False]}
- default_options = {
+ settings = {'os': None, 'build_type': None, 'compiler': None, 'arch': ['x86_64']} # noqa: RUF012
+ options = {'shared': [True, False]} # noqa: RUF012
+ default_options = { # noqa: RUF012
'mkl-static:threaded': False,
'ipp-static:simcenter_backend': True,
}
@@ -28,34 +28,34 @@ class simCenterBackendApps(ConanFile):
_build_subfolder = 'build_subfolder'
# Set short paths for Windows
short_paths = True
- scm = {
+ scm = { # noqa: RUF012
'type': 'git', # Use "type": "svn", if local repo is managed using SVN
'subfolder': _source_subfolder,
'url': 'auto',
'revision': 'auto',
}
- def configure(self):
+ def configure(self): # noqa: D102
self.options.shared = False
if self.settings.os == 'Windows':
self.options['libcurl'].with_winssl = True
self.options['libcurl'].with_openssl = False
- def configure_cmake(self):
+ def configure_cmake(self): # noqa: D102
cmake = CMake(self)
cmake.configure(source_folder=self._source_subfolder)
return cmake
- def build(self):
+ def build(self): # noqa: D102
cmake = self.configure_cmake()
cmake.build()
- def package(self):
+ def package(self): # noqa: D102
self.copy(pattern='LICENSE', dst='licenses', src=self._source_subfolder)
cmake = self.configure_cmake()
cmake.install()
self.copy('*', dst='bin', src=self._source_subfolder + '/applications')
- def package_info(self):
- self.env_info.PATH.append(os.path.join(self.package_folder, 'bin'))
+ def package_info(self): # noqa: D102
+ self.env_info.PATH.append(os.path.join(self.package_folder, 'bin')) # noqa: PTH118
diff --git a/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/EE_Test1/templatedir/postprocess.py b/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/EE_Test1/templatedir/postprocess.py
index ab4f22b82..5425b54ed 100644
--- a/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/EE_Test1/templatedir/postprocess.py
+++ b/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/EE_Test1/templatedir/postprocess.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python # noqa: EXE001, D100
# written: fmk, adamzs 01/18
@@ -6,47 +6,47 @@
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
import sys
-def process_results(inputArgs):
+def process_results(inputArgs): # noqa: N803, D103
#
# process output file "node.out" for nodal displacements
#
- with open('node.out') as inFile:
+ with open('node.out') as inFile: # noqa: PTH123, N806
line = inFile.readline()
line = inFile.readline()
line = inFile.readline()
displ = line.split()
- numNode = len(displ)
+ numNode = len(displ) # noqa: N806
- inFile.close
+ inFile.close # noqa: B018
# now process the input args and write the results file
- outFile = open('results.out', 'w')
+ outFile = open('results.out', 'w') # noqa: SIM115, PTH123, N806
# note for now assuming no ERROR in user data
for i in inputArgs:
- theList = i.split('_')
+ theList = i.split('_') # noqa: N806
- if len(theList) == 4:
+ if len(theList) == 4: # noqa: PLR2004
dof = int(theList[3])
else:
dof = 1
if theList[0] == 'Node':
- nodeTag = int(theList[1])
+ nodeTag = int(theList[1]) # noqa: N806
if nodeTag > 0 and nodeTag <= numNode:
if theList[2] == 'Disp':
- nodeDisp = abs(float(displ[((nodeTag - 1) * 2) + dof - 1]))
+ nodeDisp = abs(float(displ[((nodeTag - 1) * 2) + dof - 1])) # noqa: N806
outFile.write(str(nodeDisp))
outFile.write(' ')
else:
@@ -56,13 +56,13 @@ def process_results(inputArgs):
else:
outFile.write('0. ')
- outFile.close
+ outFile.close # noqa: B018
if __name__ == '__main__':
n = len(sys.argv)
responses = []
for i in range(1, n):
- responses.append(sys.argv[i])
+ responses.append(sys.argv[i]) # noqa: PERF401
process_results(responses)
diff --git a/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/EE_Test1/templatedir/writeParam.py b/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/EE_Test1/templatedir/writeParam.py
index 0b8bb4152..e43b0dc79 100644
--- a/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/EE_Test1/templatedir/writeParam.py
+++ b/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/EE_Test1/templatedir/writeParam.py
@@ -1,32 +1,32 @@
-import os
+import os # noqa: INP001, D100
import sys
-def main():
- paramsIn = sys.argv[1]
- paramsOut = sys.argv[2]
+def main(): # noqa: D103
+ paramsIn = sys.argv[1] # noqa: N806
+ paramsOut = sys.argv[2] # noqa: N806
- if not os.path.isfile(paramsIn):
- print(f'Input param file {paramsIn} does not exist. Exiting...')
+ if not os.path.isfile(paramsIn): # noqa: PTH113
+ print(f'Input param file {paramsIn} does not exist. Exiting...') # noqa: T201
sys.exit()
- outFILE = open(paramsOut, 'w')
+ outFILE = open(paramsOut, 'w') # noqa: SIM115, PTH123, N806
- with open(paramsIn) as inFILE:
+ with open(paramsIn) as inFILE: # noqa: PTH123, N806
line = inFILE.readline()
- splitLine = line.split()
- numRV = int(splitLine[3])
+ splitLine = line.split() # noqa: N806
+ numRV = int(splitLine[3]) # noqa: N806
print(numRV, file=outFILE)
- for i in range(numRV):
+ for i in range(numRV): # noqa: B007
line = inFILE.readline()
- splitLine = line.split()
- nameRV = splitLine[1]
- valueRV = splitLine[3]
+ splitLine = line.split() # noqa: N806
+ nameRV = splitLine[1] # noqa: N806
+ valueRV = splitLine[3] # noqa: N806
print(f'{nameRV} {valueRV}', file=outFILE)
- outFILE.close
- inFILE.close
+ outFILE.close # noqa: B018
+ inFILE.close # noqa: B018
if __name__ == '__main__':
diff --git a/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test1/templatedir/TrussPost.py b/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test1/templatedir/TrussPost.py
index 37a92451a..7388a0878 100644
--- a/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test1/templatedir/TrussPost.py
+++ b/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test1/templatedir/TrussPost.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python # noqa: EXE001, D100
# written: fmk, adamzs 01/18
@@ -6,45 +6,45 @@
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
import sys
-def process_results(inputArgs):
+def process_results(inputArgs): # noqa: N803, D103
#
# process output file "node.out" for nodal displacements
#
- with open('node.out') as inFile:
+ with open('node.out') as inFile: # noqa: PTH123, N806
line = inFile.readline()
displ = line.split()
- numNode = len(displ)
+ numNode = len(displ) # noqa: N806
- inFile.close
+ inFile.close # noqa: B018
# now process the input args and write the results file
- outFile = open('results.out', 'w')
+ outFile = open('results.out', 'w') # noqa: SIM115, PTH123, N806
# note for now assuming no ERROR in user data
for i in inputArgs:
- theList = i.split('_')
+ theList = i.split('_') # noqa: N806
- if len(theList) == 4:
+ if len(theList) == 4: # noqa: PLR2004
dof = int(theList[3])
else:
dof = 1
if theList[0] == 'Node':
- nodeTag = int(theList[1])
+ nodeTag = int(theList[1]) # noqa: N806
if nodeTag > 0 and nodeTag <= numNode:
if theList[2] == 'Disp':
- nodeDisp = abs(float(displ[((nodeTag - 1) * 2) + dof - 1]))
+ nodeDisp = abs(float(displ[((nodeTag - 1) * 2) + dof - 1])) # noqa: N806
outFile.write(str(nodeDisp))
outFile.write(' ')
else:
@@ -54,13 +54,13 @@ def process_results(inputArgs):
else:
outFile.write('0. ')
- outFile.close
+ outFile.close # noqa: B018
if __name__ == '__main__':
n = len(sys.argv)
responses = []
for i in range(1, n):
- responses.append(sys.argv[i])
+ responses.append(sys.argv[i]) # noqa: PERF401
process_results(responses)
diff --git a/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test2/templatedir/TrussModel.py b/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test2/templatedir/TrussModel.py
index 679ef4289..179c59a31 100644
--- a/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test2/templatedir/TrussModel.py
+++ b/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test2/templatedir/TrussModel.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python # noqa: EXE001, D100
# written: fmk, adamzs 06/20
# units kN & mm
@@ -9,10 +9,10 @@
ops.wipe()
-from TrussParams import *
+from TrussParams import * # noqa: E402, F403
-def run_analysis():
+def run_analysis(): # noqa: D103
# build the model
ops.model('basic', '-ndm', 2, '-ndf', 2)
@@ -27,23 +27,23 @@ def run_analysis():
ops.fix(1, 1, 1)
ops.fix(4, 0, 1)
- ops.uniaxialMaterial('Elastic', 1, E)
+ ops.uniaxialMaterial('Elastic', 1, E) # noqa: F405
- ops.element('truss', 1, 1, 2, Ao, 1)
- ops.element('truss', 2, 2, 3, Ao, 1)
- ops.element('truss', 3, 3, 4, Ao, 1)
- ops.element('truss', 4, 1, 5, Au, 1)
- ops.element('truss', 5, 5, 6, Au, 1)
- ops.element('truss', 6, 6, 4, Au, 1)
- ops.element('truss', 7, 2, 5, Ao, 1)
- ops.element('truss', 8, 3, 6, Ao, 1)
- ops.element('truss', 9, 5, 3, Ao, 1)
+ ops.element('truss', 1, 1, 2, Ao, 1) # noqa: F405
+ ops.element('truss', 2, 2, 3, Ao, 1) # noqa: F405
+ ops.element('truss', 3, 3, 4, Ao, 1) # noqa: F405
+ ops.element('truss', 4, 1, 5, Au, 1) # noqa: F405
+ ops.element('truss', 5, 5, 6, Au, 1) # noqa: F405
+ ops.element('truss', 6, 6, 4, Au, 1) # noqa: F405
+ ops.element('truss', 7, 2, 5, Ao, 1) # noqa: F405
+ ops.element('truss', 8, 3, 6, Ao, 1) # noqa: F405
+ ops.element('truss', 9, 5, 3, Ao, 1) # noqa: F405
ops.timeSeries('Linear', 1)
ops.pattern('Plain', 1, 1)
- ops.load(2, 0, -P)
- ops.load(3, 0, -P)
+ ops.load(2, 0, -P) # noqa: F405
+ ops.load(3, 0, -P) # noqa: F405
# build and perform the analysis
@@ -59,24 +59,24 @@ def run_analysis():
[ops.nodeDisp(node_i, dof_j) for dof_j in [1, 2]] for node_i in range(1, 7)
]
- return node_disp
+ return node_disp # noqa: RET504
-def process_results(responses, node_disp):
+def process_results(responses, node_disp): # noqa: D103
# identify the responses of interest
nodes = [int(r.split('_')[1]) for r in responses]
- dofs = [int(r.split('_')[3]) if len(r.split('_')) > 2 else 1 for r in responses]
+ dofs = [int(r.split('_')[3]) if len(r.split('_')) > 2 else 1 for r in responses] # noqa: PLR2004
# get the results
results = []
for n_i, d_i in zip(nodes, dofs):
try:
results.append(str(node_disp[n_i - 1][d_i - 1]))
- except:
+ except: # noqa: PERF203, E722
results.append('0.0')
# save the results
- with open('results.out', 'w') as f:
+ with open('results.out', 'w') as f: # noqa: PTH123
f.write(' '.join(results))
diff --git a/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test2/templatedir/TrussParams.py b/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test2/templatedir/TrussParams.py
index 686d8b2ee..22c5b1c9e 100644
--- a/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test2/templatedir/TrussParams.py
+++ b/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test2/templatedir/TrussParams.py
@@ -1,4 +1,4 @@
-# set some parameters
+# set some parameters # noqa: INP001, D100
E = 'RV.E'
P = 'RV.P'
diff --git a/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test3/templatedir/TrussPost.py b/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test3/templatedir/TrussPost.py
index 37a92451a..7388a0878 100644
--- a/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test3/templatedir/TrussPost.py
+++ b/modules/performUQ/SimCenterUQ/nataf_gsa/test/Examples/Test3/templatedir/TrussPost.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python # noqa: EXE001, D100
# written: fmk, adamzs 01/18
@@ -6,45 +6,45 @@
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
import sys
-def process_results(inputArgs):
+def process_results(inputArgs): # noqa: N803, D103
#
# process output file "node.out" for nodal displacements
#
- with open('node.out') as inFile:
+ with open('node.out') as inFile: # noqa: PTH123, N806
line = inFile.readline()
displ = line.split()
- numNode = len(displ)
+ numNode = len(displ) # noqa: N806
- inFile.close
+ inFile.close # noqa: B018
# now process the input args and write the results file
- outFile = open('results.out', 'w')
+ outFile = open('results.out', 'w') # noqa: SIM115, PTH123, N806
# note for now assuming no ERROR in user data
for i in inputArgs:
- theList = i.split('_')
+ theList = i.split('_') # noqa: N806
- if len(theList) == 4:
+ if len(theList) == 4: # noqa: PLR2004
dof = int(theList[3])
else:
dof = 1
if theList[0] == 'Node':
- nodeTag = int(theList[1])
+ nodeTag = int(theList[1]) # noqa: N806
if nodeTag > 0 and nodeTag <= numNode:
if theList[2] == 'Disp':
- nodeDisp = abs(float(displ[((nodeTag - 1) * 2) + dof - 1]))
+ nodeDisp = abs(float(displ[((nodeTag - 1) * 2) + dof - 1])) # noqa: N806
outFile.write(str(nodeDisp))
outFile.write(' ')
else:
@@ -54,13 +54,13 @@ def process_results(inputArgs):
else:
outFile.write('0. ')
- outFile.close
+ outFile.close # noqa: B018
if __name__ == '__main__':
n = len(sys.argv)
responses = []
for i in range(1, n):
- responses.append(sys.argv[i])
+ responses.append(sys.argv[i]) # noqa: PERF401
process_results(responses)
diff --git a/modules/performUQ/SimCenterUQ/notBeingUsed/SimCenterUQFEM.py b/modules/performUQ/SimCenterUQ/notBeingUsed/SimCenterUQFEM.py
index 43ae9990f..115af89df 100644
--- a/modules/performUQ/SimCenterUQ/notBeingUsed/SimCenterUQFEM.py
+++ b/modules/performUQ/SimCenterUQ/notBeingUsed/SimCenterUQFEM.py
@@ -1,14 +1,14 @@
-# import functions for Python 2.X support
+# import functions for Python 2.X support # noqa: INP001, D100
import os
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
import argparse
import platform
@@ -19,7 +19,7 @@
from preprocessJSON import preProcessDakota
-def main(args):
+def main(args): # noqa: D103
parser = argparse.ArgumentParser()
parser.add_argument('--workflowInput')
@@ -32,52 +32,52 @@ def main(args):
args, unknowns = parser.parse_known_args()
- inputFile = args.workflowInput
- runType = args.runType
- workflow_driver = args.driverFile
- outputFile = args.workflowOutput
- rvFiles = args.filesWithRV
- edpFiles = args.filesWithEDP
+ inputFile = args.workflowInput # noqa: N806, F841
+ runType = args.runType # noqa: N806, F841
+ workflow_driver = args.driverFile # noqa: F841
+ outputFile = args.workflowOutput # noqa: N806, F841
+ rvFiles = args.filesWithRV # noqa: N806, F841
+ edpFiles = args.filesWithEDP # noqa: N806, F841
- myScriptDir = os.path.dirname(os.path.realpath(__file__))
+ myScriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N806
# desktop applications
if (
- uqData['samples'] is None
+ uqData['samples'] is None # noqa: F821
): # this happens with new applications, workflow to change
- print('RUNNING PREPROCESSOR\n')
- osType = platform.system()
- preprocessorCommand = f'"{myScriptDir}/preprocessDakota" {bimName} {samName} {evtName} {edpName} {simName} {driverFile} {runDakota} {osType}'
- subprocess.Popen(preprocessorCommand, shell=True).wait()
- print('DONE RUNNING PREPROCESSOR\n')
+ print('RUNNING PREPROCESSOR\n') # noqa: T201
+ osType = platform.system() # noqa: N806
+ preprocessorCommand = f'"{myScriptDir}/preprocessDakota" {bimName} {samName} {evtName} {edpName} {simName} {driverFile} {runDakota} {osType}' # noqa: N806, F821
+ subprocess.Popen(preprocessorCommand, shell=True).wait() # noqa: S602
+ print('DONE RUNNING PREPROCESSOR\n') # noqa: T201
else:
- scriptDir = os.path.dirname(os.path.realpath(__file__))
- numRVs = preProcessDakota(
- bimName,
- evtName,
- samName,
- edpName,
- simName,
- driverFile,
- runDakota,
- uqData,
+ scriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N806, F841
+ numRVs = preProcessDakota( # noqa: N806, F841
+ bimName, # noqa: F821
+ evtName, # noqa: F821
+ samName, # noqa: F821
+ edpName, # noqa: F821
+ simName, # noqa: F821
+ driverFile, # noqa: F821
+ runDakota, # noqa: F821
+ uqData, # noqa: F821
)
- shutil.move(bimName, 'bim.j')
- shutil.move(evtName, 'evt.j')
- if os.path.isfile(samName):
- shutil.move(samName, 'sam.j')
- shutil.move(edpName, 'edp.j')
+ shutil.move(bimName, 'bim.j') # noqa: F821
+ shutil.move(evtName, 'evt.j') # noqa: F821
+ if os.path.isfile(samName): # noqa: PTH113, F821
+ shutil.move(samName, 'sam.j') # noqa: F821
+ shutil.move(edpName, 'edp.j') # noqa: F821
# Setting Workflow Driver Name
- workflowDriverName = 'workflow_driver'
- if (platform.system() == 'Windows') and (runDakota == 'run'):
- workflowDriverName = 'workflow_driver.bat'
+ workflowDriverName = 'workflow_driver' # noqa: N806
+ if (platform.system() == 'Windows') and (runDakota == 'run'): # noqa: F821
+ workflowDriverName = 'workflow_driver.bat' # noqa: N806
# Change permission of workflow driver
- st = os.stat(workflowDriverName)
- os.chmod(workflowDriverName, st.st_mode | stat.S_IEXEC)
+ st = os.stat(workflowDriverName) # noqa: PTH116
+ os.chmod(workflowDriverName, st.st_mode | stat.S_IEXEC) # noqa: PTH101
# copy the dakota input file to the main working dir for the structure
shutil.move('dakota.in', '../')
@@ -85,19 +85,19 @@ def main(args):
# change dir to the main working dir for the structure
os.chdir('../')
- if runDakota == 'run':
- dakotaCommand = (
+ if runDakota == 'run': # noqa: F821
+ dakotaCommand = ( # noqa: N806
'dakota -input dakota.in -output dakota.out -error dakota.err'
)
- print('running Dakota: ', dakotaCommand)
+ print('running Dakota: ', dakotaCommand) # noqa: T201
try:
- result = subprocess.check_output(
+ result = subprocess.check_output( # noqa: S602
dakotaCommand, stderr=subprocess.STDOUT, shell=True
)
returncode = 0
except subprocess.CalledProcessError as e:
- result = e.output
- returncode = e.returncode
+ result = e.output # noqa: F841
+ returncode = e.returncode # noqa: F841
if __name__ == '__main__':
diff --git a/modules/performUQ/SimCenterUQ/notBeingUsed/parseSimCenterUQ.py b/modules/performUQ/SimCenterUQ/notBeingUsed/parseSimCenterUQ.py
index b3cbddfd0..c9e433127 100644
--- a/modules/performUQ/SimCenterUQ/notBeingUsed/parseSimCenterUQ.py
+++ b/modules/performUQ/SimCenterUQ/notBeingUsed/parseSimCenterUQ.py
@@ -1,11 +1,11 @@
-# written: UQ team @ SimCenter
+# written: UQ team @ SimCenter # noqa: INP001, D100
# import functions for Python 2.X support
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
@@ -16,7 +16,7 @@
import subprocess
import sys
-inputArgs = sys.argv
+inputArgs = sys.argv # noqa: N816
workdir_main = inputArgs[1]
workdir_temp = inputArgs[2]
@@ -33,23 +33,23 @@
if sys.platform == 'darwin':
OpenSees = 'OpenSees'
surrogate = 'surrogateBuild.py'
- natafExe = 'nataf_gsa'
+ natafExe = 'nataf_gsa' # noqa: N816
Feap = 'feappv'
Dakota = 'dakota'
- plomScript = 'runPLoM.py'
+ plomScript = 'runPLoM.py' # noqa: N816
workflow_driver = 'workflow_driver'
- osType = 'Darwin'
+ osType = 'Darwin' # noqa: N816
# Windows
else:
OpenSees = 'OpenSees'
Feap = 'Feappv41.exe'
surrogate = 'surrogateBuild.py'
- natafExe = 'nataf_gsa.exe'
+ natafExe = 'nataf_gsa.exe' # noqa: N816
Dakota = 'dakota'
- plomScript = 'runPLoM.py'
+ plomScript = 'runPLoM.py' # noqa: N816
workflow_driver = 'workflow_driver.bat'
- osType = 'Windows'
+ osType = 'Windows' # noqa: N816
# Stampede @ DesignSafe, DON'T EDIT
elif run_type == 'runningRemote':
@@ -57,16 +57,16 @@
Feap = '/home1/00477/tg457427/bin/feappv'
Dakota = 'dakota'
workflow_driver = 'workflow_driver'
- osType = 'Linux'
+ osType = 'Linux' # noqa: N816
# change workdir to the templatedir
os.chdir(workdir_temp)
-cwd = os.getcwd()
+cwd = os.getcwd() # noqa: PTH109
-print(cwd)
+print(cwd) # noqa: T201
# open the dakota json file
-with open('dakota.json') as data_file:
+with open('dakota.json') as data_file: # noqa: PTH123
data = json.load(data_file)
uq_data = data['UQ_Method']
@@ -74,40 +74,40 @@
rnd_data = data['randomVariables']
my_edps = data['EDP']
-myScriptDir = os.path.dirname(os.path.realpath(__file__))
-inputFile = 'dakota.json'
+myScriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N816
+inputFile = 'dakota.json' # noqa: N816
-osType = platform.system()
+osType = platform.system() # noqa: N816
# preprocessorCommand = '"{}/preprocessSimCenterUQ" {} {} {} {}'.format(myScriptDir, inputFile, workflow_driver, run_type, osType)
# subprocess.Popen(preprocessorCommand, shell=True).wait()
# print("DONE RUNNING PREPROCESSOR\n")
# edps = samplingData["edps"]
-numResponses = 0
-responseDescriptors = []
+numResponses = 0 # noqa: N816
+responseDescriptors = [] # noqa: N816
for edp in my_edps:
responseDescriptors.append(edp['name'])
- numResponses += 1
+ numResponses += 1 # noqa: SIM113, N816
-femProgram = fem_data['program']
-print(femProgram)
+femProgram = fem_data['program'] # noqa: N816
+print(femProgram) # noqa: T201
if run_type == 'runningLocal':
- os.chmod(workflow_driver, stat.S_IXUSR | stat.S_IRUSR | stat.S_IXOTH)
+ os.chmod(workflow_driver, stat.S_IXUSR | stat.S_IRUSR | stat.S_IXOTH) # noqa: PTH101
# command = Dakota + ' -input dakota.in -output dakota.out -error dakota.err'
# Change permission of workflow driver
-st = os.stat(workflow_driver)
-os.chmod(workflow_driver, st.st_mode | stat.S_IEXEC)
+st = os.stat(workflow_driver) # noqa: PTH116
+os.chmod(workflow_driver, st.st_mode | stat.S_IEXEC) # noqa: PTH101
# change dir to the main working dir for the structure
os.chdir('../')
-cwd = os.getcwd()
-print(cwd)
+cwd = os.getcwd() # noqa: PTH109
+print(cwd) # noqa: T201
if run_type == 'runningLocal':
# p = Popen(command, stdout=PIPE, stderr=PIPE, shell=True)
@@ -122,32 +122,32 @@
if uq_data['uqType'] == 'Train GP Surrogate Model':
# simCenterUQCommand = 'python "{}/{}" {} {} {}'.format(myScriptDir,surrogate,workdir_main,osType,run_type)
- simCenterUQCommand = '"{}" "{}/{}" "{}" {} {}'.format(
+ simCenterUQCommand = '"{}" "{}/{}" "{}" {} {}'.format( # noqa: N816
data['python'], myScriptDir, surrogate, workdir_main, osType, run_type
)
elif (
uq_data['uqType'] == 'Sensitivity Analysis'
or uq_data['uqType'] == 'Forward Propagation'
):
- simCenterUQCommand = (
+ simCenterUQCommand = ( # noqa: N816
f'"{myScriptDir}/{natafExe}" "{workdir_main}" {osType} {run_type}'
)
elif uq_data['uqType'] == 'Train PLoM Model':
- simCenterUQCommand = '"{}" "{}/{}" "{}" {} {}'.format(
+ simCenterUQCommand = '"{}" "{}/{}" "{}" {} {}'.format( # noqa: N816
data['python'], myScriptDir, plomScript, workdir_main, osType, run_type
)
- print('running SimCenterUQ: ', simCenterUQCommand)
+ print('running SimCenterUQ: ', simCenterUQCommand) # noqa: T201
# subprocess.Popen(simCenterUQCommand, shell=True).wait()
try:
- result = subprocess.check_output(
+ result = subprocess.check_output( # noqa: S602
simCenterUQCommand, stderr=subprocess.STDOUT, shell=True
)
returncode = 0
- print('DONE SUCESS')
+ print('DONE SUCESS') # noqa: T201
except subprocess.CalledProcessError as e:
result = e.output
returncode = e.returncode
- print('DONE FAIL')
+ print('DONE FAIL') # noqa: T201
diff --git a/modules/performUQ/SimCenterUQ/notBeingUsed/surrogateBuild.py b/modules/performUQ/SimCenterUQ/notBeingUsed/surrogateBuild.py
index 04a980da1..2d4a5bf54 100644
--- a/modules/performUQ/SimCenterUQ/notBeingUsed/surrogateBuild.py
+++ b/modules/performUQ/SimCenterUQ/notBeingUsed/surrogateBuild.py
@@ -1,4 +1,4 @@
-import glob
+import glob # noqa: INP001, D100
import json
import math
import os
@@ -12,7 +12,7 @@
from copy import deepcopy
import emukit.multi_fidelity as emf
-import GPy as GPy
+import GPy as GPy # noqa: PLC0414
import numpy as np
from emukit.model_wrappers.gpy_model_wrappers import GPyMultiOutputWrapper
from emukit.multi_fidelity.convert_lists_to_array import (
@@ -22,12 +22,12 @@
from scipy.stats import lognorm, norm
-class GpFromModel:
- def __init__(
+class GpFromModel: # noqa: D101
+ def __init__( # noqa: C901, PLR0912, PLR0915
self,
work_dir,
- inputFile,
- workflowDriver,
+ inputFile, # noqa: N803
+ workflowDriver, # noqa: N803
run_type,
os_type,
inp,
@@ -45,12 +45,12 @@ def __init__(
# From external READ JSON FILE
#
- rv_name = list()
- self.g_name = list()
+ rv_name = list() # noqa: C408
+ self.g_name = list() # noqa: C408
x_dim = 0
y_dim = 0
for rv in inp['randomVariables']:
- rv_name = rv_name + [rv['name']]
+ rv_name = rv_name + [rv['name']] # noqa: RUF005
x_dim += 1
if x_dim == 0:
@@ -59,11 +59,11 @@ def __init__(
for g in inp['EDP']:
if g['length'] == 1: # scalar
- self.g_name = self.g_name + [g['name']]
+ self.g_name = self.g_name + [g['name']] # noqa: RUF005
y_dim += 1
else: # vector
for nl in range(g['length']):
- self.g_name = self.g_name + ['{}_{}'.format(g['name'], nl + 1)]
+ self.g_name = self.g_name + ['{}_{}'.format(g['name'], nl + 1)] # noqa: RUF005
y_dim += 1
if y_dim == 0:
@@ -79,11 +79,11 @@ def __init__(
self.do_predictive = False
automate_doe = False
- surrogateInfo = inp['UQ_Method']['surrogateMethodInfo']
+ surrogateInfo = inp['UQ_Method']['surrogateMethodInfo'] # noqa: N806
try:
self.do_parallel = surrogateInfo['parallelExecution']
- except:
+ except: # noqa: E722
self.do_parallel = True
if self.do_parallel:
@@ -101,8 +101,8 @@ def __init__(
self.pool = MPIPoolExecutor()
self.n_processor = self.world.Get_size()
# self.n_processor =20
- print('nprocessor :')
- print(self.n_processor)
+ print('nprocessor :') # noqa: T201
+ print(self.n_processor) # noqa: T201
# self.cal_interval = 5
self.cal_interval = self.n_processor
@@ -116,8 +116,8 @@ def __init__(
do_simulation = True
self.use_existing = surrogateInfo['existingDoE']
if self.use_existing:
- self.inpData = os.path.join(work_dir, 'templatedir/inpFile.in')
- self.outData = os.path.join(work_dir, 'templatedir/outFile.in')
+ self.inpData = os.path.join(work_dir, 'templatedir/inpFile.in') # noqa: PTH118
+ self.outData = os.path.join(work_dir, 'templatedir/outFile.in') # noqa: PTH118
thr_count = surrogateInfo['samples'] # number of samples
if surrogateInfo['advancedOpt']:
@@ -140,10 +140,10 @@ def __init__(
self.doe_method = 'None' # default
do_doe = False
# self.inpData = surrogateInfo['inpFile']
- self.inpData = os.path.join(work_dir, 'templatedir/inpFile.in')
+ self.inpData = os.path.join(work_dir, 'templatedir/inpFile.in') # noqa: PTH118
if not do_simulation:
# self.outData = surrogateInfo['outFile']
- self.outData = os.path.join(work_dir, 'templatedir/outFile.in')
+ self.outData = os.path.join(work_dir, 'templatedir/outFile.in') # noqa: PTH118
elif surrogateInfo['method'] == 'Import Multi-fidelity Data File':
self.do_mf = True
@@ -156,15 +156,15 @@ def __init__(
self.use_existing_hf = surrogateInfo['existingDoE_HF']
self.samples_hf = surrogateInfo['samples_HF']
if self.use_existing_hf:
- self.inpData = os.path.join(
+ self.inpData = os.path.join( # noqa: PTH118
work_dir, 'templatedir/inpFile_HF.in'
)
- self.outData = os.path.join(
+ self.outData = os.path.join( # noqa: PTH118
work_dir, 'templatedir/outFile_HF.in'
)
else:
- self.inpData_hf = os.path.join(work_dir, 'templatedir/inpFile_HF.in')
- self.outData_hf = os.path.join(work_dir, 'templatedir/outFile_HF.in')
+ self.inpData_hf = os.path.join(work_dir, 'templatedir/inpFile_HF.in') # noqa: PTH118
+ self.outData_hf = os.path.join(work_dir, 'templatedir/outFile_HF.in') # noqa: PTH118
self.X_hf = read_txt(self.inpData_hf, errlog)
self.Y_hf = read_txt(self.outData_hf, errlog)
if self.X_hf.shape[0] != self.Y_hf.shape[0]:
@@ -175,15 +175,15 @@ def __init__(
self.use_existing_lf = surrogateInfo['existingDoE_LF']
self.samples_lf = surrogateInfo['samples_LF']
if self.use_existing_lf:
- self.inpData = os.path.join(
+ self.inpData = os.path.join( # noqa: PTH118
work_dir, 'templatedir/inpFile_LF.in'
)
- self.outData = os.path.join(
+ self.outData = os.path.join( # noqa: PTH118
work_dir, 'templatedir/outFile_LF.in'
)
else:
- self.inpData_lf = os.path.join(work_dir, 'templatedir/inpFile_LF.in')
- self.outData_lf = os.path.join(work_dir, 'templatedir/outFile_LF.in')
+ self.inpData_lf = os.path.join(work_dir, 'templatedir/inpFile_LF.in') # noqa: PTH118
+ self.outData_lf = os.path.join(work_dir, 'templatedir/outFile_LF.in') # noqa: PTH118
self.X_lf = read_txt(self.inpData_lf, errlog)
self.Y_lf = read_txt(self.outData_lf, errlog)
if self.X_lf.shape[0] != self.Y_lf.shape[0]:
@@ -267,19 +267,19 @@ def __init__(
errlog.exit(msg)
if nugget_opt == 'Fixed Values':
- for Vals in self.nuggetVal:
+ for Vals in self.nuggetVal: # noqa: N806
if not np.isscalar(Vals):
msg = 'Error reading json: provide nugget values of each QoI with comma delimiter'
errlog.exit(msg)
elif nugget_opt == 'Fixed Bounds':
- for Bous in self.nuggetVal:
+ for Bous in self.nuggetVal: # noqa: N806
if np.isscalar(Bous):
msg = 'Error reading json: provide nugget bounds of each QoI in brackets with comma delimiter, e.g. [0.0,1.0],[0.0,2.0],...'
errlog.exit(msg)
elif isinstance(Bous, list):
msg = 'Error reading json: provide both lower and upper bounds of nugget'
errlog.exit(msg)
- elif Bous.shape[0] != 2:
+ elif Bous.shape[0] != 2: # noqa: PLR2004
msg = 'Error reading json: provide nugget bounds of each QoI in brackets with comma delimiter, e.g. [0.0,1.0],[0.0,2.0],...'
errlog.exit(msg)
elif Bous[0] > Bous[1]:
@@ -313,7 +313,7 @@ def __init__(
#
if do_sampling:
- thr_NRMSE = surrogateInfo['accuracyLimit']
+ thr_NRMSE = surrogateInfo['accuracyLimit'] # noqa: N806
thr_t = surrogateInfo['timeLimit'] * 60
np.random.seed(surrogateInfo['seed'])
@@ -337,8 +337,8 @@ def __init__(
#
if self.use_existing:
- X_tmp = read_txt(self.inpData, errlog)
- Y_tmp = read_txt(self.outData, errlog)
+ X_tmp = read_txt(self.inpData, errlog) # noqa: N806
+ Y_tmp = read_txt(self.outData, errlog) # noqa: N806
n_ex = X_tmp.shape[0]
if self.do_mf:
@@ -368,8 +368,8 @@ def __init__(
# msg = 'Error reading json: # of initial DoE should be greater than 0'
# errlog.exit(msg)
user_init = -1
- X_tmp = np.zeros((0, x_dim))
- Y_tmp = np.zeros((0, y_dim))
+ X_tmp = np.zeros((0, x_dim)) # noqa: N806
+ Y_tmp = np.zeros((0, y_dim)) # noqa: N806
if user_init < 0:
n_init_ref = min(4 * x_dim, thr_count + n_ex - 1, 500)
@@ -387,7 +387,7 @@ def __init__(
n_iter = thr_count - n_init
- def FEM_batch(Xs, id_sim):
+ def FEM_batch(Xs, id_sim): # noqa: N802, N803
return run_FEM_batch(
Xs,
id_sim,
@@ -405,14 +405,14 @@ def FEM_batch(Xs, id_sim):
# check validity of datafile
if n_ex > 0:
# Y_test, self.id_sim = FEM_batch(X_tmp[0, :][np.newaxis], self.id_sim)
- # TODO : Fix this
- print(X_tmp[0, :][np.newaxis].shape)
- X_test, Y_test, self.id_sim = FEM_batch(
+ # TODO : Fix this # noqa: TD002
+ print(X_tmp[0, :][np.newaxis].shape) # noqa: T201
+ X_test, Y_test, self.id_sim = FEM_batch( # noqa: N806
X_tmp[0, :][np.newaxis], self.id_sim
)
if (
np.sum(
- abs((Y_test - Y_tmp[0, :][np.newaxis]) / Y_test) > 0.01,
+ abs((Y_test - Y_tmp[0, :][np.newaxis]) / Y_test) > 0.01, # noqa: PLR2004
axis=1,
)
> 0
@@ -429,17 +429,17 @@ def FEM_batch(Xs, id_sim):
#
if n_init > 0:
- U = lhs(x_dim, samples=(n_init))
- X = np.vstack([X_tmp, np.zeros((n_init, x_dim))])
+ U = lhs(x_dim, samples=(n_init)) # noqa: N806
+ X = np.vstack([X_tmp, np.zeros((n_init, x_dim))]) # noqa: N806
for nx in range(x_dim):
X[n_ex : n_ex + n_init, nx] = (
U[:, nx] * (self.xrange[nx, 1] - self.xrange[nx, 0])
+ self.xrange[nx, 0]
)
else:
- X = X_tmp
+ X = X_tmp # noqa: N806
- if sum(abs(self.len / self.xrange[:, 0]) < 1.0e-7) > 1:
+ if sum(abs(self.len / self.xrange[:, 0]) < 1.0e-7) > 1: # noqa: PLR2004
msg = 'Error : upperbound and lowerbound should not be the same'
errlog.exit(msg)
@@ -447,14 +447,14 @@ def FEM_batch(Xs, id_sim):
else:
n_ex = 0
- thr_NRMSE = 0.02 # default
+ thr_NRMSE = 0.02 # default # noqa: N806
thr_t = float('inf')
#
# Read sample locations from directory
#
- X = read_txt(self.inpData, errlog)
+ X = read_txt(self.inpData, errlog) # noqa: N806
if self.do_mf:
if X.shape[1] != self.X_hf.shape[1]:
@@ -474,7 +474,7 @@ def FEM_batch(Xs, id_sim):
# give error
- if thr_count <= 2:
+ if thr_count <= 2: # noqa: PLR2004
msg = 'Number of samples should be greater than 2.'
errlog.exit(msg)
@@ -501,21 +501,21 @@ def FEM_batch(Xs, id_sim):
#
# SimCenter workflow setting
#
- if os.path.exists(f'{work_dir}/workdir.1'):
+ if os.path.exists(f'{work_dir}/workdir.1'): # noqa: PTH110
is_left = True
idx = 0
def change_permissions_recursive(path, mode):
- for root, dirs, files in os.walk(path, topdown=False):
- for dir in [os.path.join(root, d) for d in dirs]:
- os.chmod(dir, mode)
- for file in [os.path.join(root, f) for f in files]:
- os.chmod(file, mode)
+ for root, dirs, files in os.walk(path, topdown=False): # noqa: B007
+ for dir in [os.path.join(root, d) for d in dirs]: # noqa: A001, PTH118
+ os.chmod(dir, mode) # noqa: PTH101
+ for file in [os.path.join(root, f) for f in files]: # noqa: PTH118
+ os.chmod(file, mode) # noqa: PTH101
while is_left:
idx = idx + 1
try:
- if os.path.exists(
+ if os.path.exists( # noqa: PTH110
f'{work_dir}/workdir.{idx}/{workflowDriver}'
):
# os.chmod('{}/workdir.{}'.format(work_dir, idx), 777)
@@ -523,33 +523,33 @@ def change_permissions_recursive(path, mode):
f'{work_dir}/workdir.{idx}', 0o777
)
my_dir = f'{work_dir}/workdir.{idx}'
- os.chmod(my_dir, 0o777)
+ os.chmod(my_dir, 0o777) # noqa: S103, PTH101
shutil.rmtree(my_dir)
# shutil.rmtree('{}/workdir.{}'.format(work_dir, idx), ignore_errors=False, onerror=handleRemoveReadonly)
- except Exception as ex:
- print(ex)
+ except Exception as ex: # noqa: BLE001
+ print(ex) # noqa: T201
is_left = True
break
- print('Cleaned the working directory')
+ print('Cleaned the working directory') # noqa: T201
else:
- print('Work directory is clean')
+ print('Work directory is clean') # noqa: T201
- if os.path.exists(f'{work_dir}/dakotaTab.out'):
- os.remove(f'{work_dir}/dakotaTab.out')
+ if os.path.exists(f'{work_dir}/dakotaTab.out'): # noqa: PTH110
+ os.remove(f'{work_dir}/dakotaTab.out') # noqa: PTH107
- if os.path.exists(f'{work_dir}/inputTab.out'):
- os.remove(f'{work_dir}/inputTab.out')
+ if os.path.exists(f'{work_dir}/inputTab.out'): # noqa: PTH110
+ os.remove(f'{work_dir}/inputTab.out') # noqa: PTH107
- if os.path.exists(f'{work_dir}/outputTab.out'):
- os.remove(f'{work_dir}/outputTab.out')
+ if os.path.exists(f'{work_dir}/outputTab.out'): # noqa: PTH110
+ os.remove(f'{work_dir}/outputTab.out') # noqa: PTH107
- if os.path.exists(f'{work_dir}/SimGpModel.pkl'):
- os.remove(f'{work_dir}/SimGpModel.pkl')
+ if os.path.exists(f'{work_dir}/SimGpModel.pkl'): # noqa: PTH110
+ os.remove(f'{work_dir}/SimGpModel.pkl') # noqa: PTH107
- if os.path.exists(f'{work_dir}/verif.out'):
- os.remove(f'{work_dir}/verif.out')
+ if os.path.exists(f'{work_dir}/verif.out'): # noqa: PTH110
+ os.remove(f'{work_dir}/verif.out') # noqa: PTH107
# func = self.__run_FEM(X,self.id_sim, self.rv_name)
@@ -558,9 +558,9 @@ def change_permissions_recursive(path, mode):
#
t_tmp = time.time()
- X_fem, Y_fem, self.id_sim = FEM_batch(X[n_ex:, :], self.id_sim)
- Y = np.vstack((Y_tmp, Y_fem))
- X = np.vstack((X[0:n_ex, :], X_fem))
+ X_fem, Y_fem, self.id_sim = FEM_batch(X[n_ex:, :], self.id_sim) # noqa: N806
+ Y = np.vstack((Y_tmp, Y_fem)) # noqa: N806
+ X = np.vstack((X[0:n_ex, :], X_fem)) # noqa: N806
t_sim_all = time.time() - t_tmp
@@ -574,8 +574,8 @@ def change_permissions_recursive(path, mode):
#
if self.do_predictive:
n_pred = 100
- Xt = np.zeros((n_pred, x_dim))
- U = lhs(x_dim, samples=n_pred)
+ Xt = np.zeros((n_pred, x_dim)) # noqa: N806
+ U = lhs(x_dim, samples=n_pred) # noqa: N806
for nx in range(x_dim):
Xt[:, nx] = (
U[:, nx] * (self.xrange[nx, 1] - self.xrange[nx, 0])
@@ -586,14 +586,14 @@ def change_permissions_recursive(path, mode):
# for ns in range(n_pred):
# Yt[ns, :],self.id_sim = run_FEM(Xt[ns, :][np.newaxis],self.id_sim, self.rv_name)
- Yt = np.zeros((n_pred, y_dim))
- Xt, Yt, self.id_sim = FEM_batch(Xt, self.id_sim)
+ Yt = np.zeros((n_pred, y_dim)) # noqa: N806
+ Xt, Yt, self.id_sim = FEM_batch(Xt, self.id_sim) # noqa: N806
else:
#
# READ SAMPLES FROM DIRECTORY
#
- Y = read_txt(self.outData, errlog)
+ Y = read_txt(self.outData, errlog) # noqa: N806
if self.do_mf:
if Y.shape[1] != self.Y_hf.shape[1]:
@@ -628,9 +628,9 @@ def change_permissions_recursive(path, mode):
if not self.do_mf:
kg = kr
- self.m_list = list()
+ self.m_list = list() # noqa: C408
for i in range(y_dim):
- self.m_list = self.m_list + [
+ self.m_list = self.m_list + [ # noqa: RUF005
GPy.models.GPRegression(
X,
Y[:, i][np.newaxis].transpose(),
@@ -640,7 +640,7 @@ def change_permissions_recursive(path, mode):
]
for parname in self.m_list[i].parameter_names():
if parname.endswith('lengthscale'):
- exec('self.m_list[i].' + parname + '=self.len')
+ exec('self.m_list[i].' + parname + '=self.len') # noqa: S102
else:
kgs = emf.kernels.LinearMultiFidelityKernel([kr.copy(), kr.copy()])
@@ -655,22 +655,22 @@ def change_permissions_recursive(path, mode):
msg = f'Error importing input data: dimension of low ({X.shape[1]}) and high ({self.X_hf.shape[1]}) fidelity models (datasets) are inconsistent'
errlog.exit(msg)
- if self.mf_case == 'data-model' or self.mf_case == 'data-data':
- X_list, Y_list = (
+ if self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
+ X_list, Y_list = ( # noqa: N806
emf.convert_lists_to_array.convert_xy_lists_to_arrays(
[X, self.X_hf], [Y, self.Y_hf]
)
)
elif self.mf_case == 'model-data':
- X_list, Y_list = (
+ X_list, Y_list = ( # noqa: N806
emf.convert_lists_to_array.convert_xy_lists_to_arrays(
[self.X_lf, X], [self.Y_lf, Y]
)
)
- self.m_list = list()
- for i in range(y_dim):
- self.m_list = self.m_list + [
+ self.m_list = list() # noqa: C408
+ for i in range(y_dim): # noqa: B007
+ self.m_list = self.m_list + [ # noqa: RUF005
GPyMultiOutputWrapper(
emf.models.GPyLinearMultiFidelityModel(
X_list, Y_list, kernel=kgs.copy(), n_fidelities=2
@@ -696,7 +696,7 @@ def change_permissions_recursive(path, mode):
break_doe = False
- print('======== RUNNING GP DoE ===========')
+ print('======== RUNNING GP DoE ===========') # noqa: T201
exit_code = 'count' # num iter
i = 0
x_new = np.zeros((0, x_dim))
@@ -705,9 +705,9 @@ def change_permissions_recursive(path, mode):
doe_off = False # false if true
while not doe_off:
- t = time.time()
+ t = time.time() # noqa: F841
if (
- self.doe_method == 'random'
+ self.doe_method == 'random' # noqa: PLR1714
or self.doe_method == 'pareto'
or np.mod(i, self.cal_interval) == 0
):
@@ -716,7 +716,7 @@ def change_permissions_recursive(path, mode):
do_cal = False
t_tmp = time.time()
- [x_new, self.m_list, err, idx, Y_cv, Y_cv_var] = (
+ [x_new, self.m_list, err, idx, Y_cv, Y_cv_var] = ( # noqa: N806
self.__design_of_experiments(
X,
Y,
@@ -732,33 +732,33 @@ def change_permissions_recursive(path, mode):
)
t_doe = time.time() - t_tmp
- print(f'DoE Time: {t_doe:.2f} s')
+ print(f'DoE Time: {t_doe:.2f} s') # noqa: T201
if automate_doe:
if t_doe > self.t_sim_each:
break_doe = True
- print('========>> DOE OFF')
+ print('========>> DOE OFF') # noqa: T201
n_left = n_iter - i
break
if not self.do_mf:
- NRMSE_val = self.__normalized_mean_sq_error(Y_cv, Y)
- elif self.mf_case == 'data-model' or self.mf_case == 'data-data':
- NRMSE_val = self.__normalized_mean_sq_error(Y_cv, self.Y_hf)
+ NRMSE_val = self.__normalized_mean_sq_error(Y_cv, Y) # noqa: N806
+ elif self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
+ NRMSE_val = self.__normalized_mean_sq_error(Y_cv, self.Y_hf) # noqa: N806
elif self.mf_case == 'model-data':
- NRMSE_val = self.__normalized_mean_sq_error(Y_cv, Y)
+ NRMSE_val = self.__normalized_mean_sq_error(Y_cv, Y) # noqa: N806
self.NRMSE_hist = np.vstack((self.NRMSE_hist, np.array(NRMSE_val)))
self.NRMSE_idx = np.vstack((self.NRMSE_idx, i))
if self.do_predictive:
- Yt_pred = np.zeros((n_pred, y_dim))
+ Yt_pred = np.zeros((n_pred, y_dim)) # noqa: N806
for ny in range(y_dim):
y_pred_tmp, dummy = self.__predict(self.m_list[ny], Xt)
Yt_pred[:, ny] = y_pred_tmp.transpose()
if self.do_logtransform:
- Yt_pred = np.exp(Yt_pred)
- NRMSE_pred_val = self.__normalized_mean_sq_error(Yt_pred, Yt)
+ Yt_pred = np.exp(Yt_pred) # noqa: N806
+ NRMSE_pred_val = self.__normalized_mean_sq_error(Yt_pred, Yt) # noqa: N806
self.NRMSE_pred_hist = np.vstack(
(self.NRMSE_pred_hist, np.array(NRMSE_pred_val))
)
@@ -803,16 +803,16 @@ def change_permissions_recursive(path, mode):
x_new, y_new, self.id_sim = FEM_batch(x_new, self.id_sim)
# print(">> {:.2f} s".format(time.time() - t_init))
- X = np.vstack([X, x_new])
- Y = np.vstack([Y, y_new])
+ X = np.vstack([X, x_new]) # noqa: N806
+ Y = np.vstack([Y, y_new]) # noqa: N806
- print('======== RUNNING GP Calibration ===========')
+ print('======== RUNNING GP Calibration ===========') # noqa: T201
# not used
if break_doe:
- X_tmp = np.zeros((n_left, x_dim))
- Y_tmp = np.zeros((n_left, y_dim))
- U = lhs(x_dim, samples=n_left)
+ X_tmp = np.zeros((n_left, x_dim)) # noqa: N806
+ Y_tmp = np.zeros((n_left, y_dim)) # noqa: N806
+ U = lhs(x_dim, samples=n_left) # noqa: N806
for nx in range(x_dim):
# X[:,nx] = np.random.uniform(xrange[nx,0], xrange[nx,1], (1, n_init))
X_tmp[:, nx] = (
@@ -820,7 +820,7 @@ def change_permissions_recursive(path, mode):
+ self.xrange[nx, 0]
)
- X_tmp, Y_tmp, self.id_sim = FEM_batch(X_tmp, self.id_sim)
+ X_tmp, Y_tmp, self.id_sim = FEM_batch(X_tmp, self.id_sim) # noqa: N806
# for ns in np.arange(n_left):
# Y_tmp[ns, :],self.id_sim = run_FEM(X_tmp[ns, :][np.newaxis],self.id_sim, self.rv_name)
@@ -830,8 +830,8 @@ def change_permissions_recursive(path, mode):
# Y_tmp = Y_tmp[:ns, :]
# break
- X = np.vstack((X, X_tmp))
- Y = np.vstack((Y, Y_tmp))
+ X = np.vstack((X, X_tmp)) # noqa: N806
+ Y = np.vstack((Y, Y_tmp)) # noqa: N806
do_doe = False
# if not do_doe:
@@ -886,16 +886,16 @@ def change_permissions_recursive(path, mode):
# plt.show()
# plt.plot(Y_cv[:,1], Y[:,1], 'x')
# plt.show()
- print(f'my exit code = {exit_code}')
- print(f'1. count = {self.id_sim}')
- print(f'2. max(NRMSE) = {np.max(NRMSE_val)}')
- print(f'3. time = {sim_time:.2f} s')
+ print(f'my exit code = {exit_code}') # noqa: T201
+ print(f'1. count = {self.id_sim}') # noqa: T201
+ print(f'2. max(NRMSE) = {np.max(NRMSE_val)}') # noqa: T201
+ print(f'3. time = {sim_time:.2f} s') # noqa: T201
# for user information
if do_simulation:
n_err = 1000
- Xerr = np.zeros((n_err, x_dim))
- U = lhs(x_dim, samples=n_err)
+ Xerr = np.zeros((n_err, x_dim)) # noqa: N806
+ U = lhs(x_dim, samples=n_err) # noqa: N806
for nx in range(x_dim):
Xerr[:, nx] = (
U[:, nx] * (self.xrange[nx, 1] - self.xrange[nx, 0])
@@ -936,7 +936,7 @@ def change_permissions_recursive(path, mode):
# exec('y_pred_prior_var[ns,ny]=m_tmp.' + parname)
# error_ratio1_Pr = (y_pred_var / y_pred_prior_var)
- error_ratio2_Pr = y_pred_var / y_data_var
+ error_ratio2_Pr = y_pred_var / y_data_var # noqa: N806
# np.max(error_ratio1_Pr, axis=0)
np.max(error_ratio2_Pr, axis=0)
@@ -948,14 +948,14 @@ def change_permissions_recursive(path, mode):
self.perc_thr = 1 - (self.perc_thr) * 0.001 # ratio=simulation/sampling
corr_val = np.zeros((y_dim,))
- R2_val = np.zeros((y_dim,))
+ R2_val = np.zeros((y_dim,)) # noqa: N806
for ny in range(y_dim):
if not self.do_mf:
- Y_ex = Y[:, ny]
- elif self.mf_case == 'data-model' or self.mf_case == 'data-data':
- Y_ex = self.Y_hf[:, ny]
+ Y_ex = Y[:, ny] # noqa: N806
+ elif self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
+ Y_ex = self.Y_hf[:, ny] # noqa: N806
elif self.mf_case == 'model-data':
- Y_ex = Y[:, ny]
+ Y_ex = Y[:, ny] # noqa: N806
corr_val[ny] = np.corrcoef(Y_ex, Y_cv[:, ny])[0, 1]
R2_val[ny] = 1 - np.sum(pow(Y_cv[:, ny] - Y_ex, 2)) / np.sum(
@@ -996,24 +996,24 @@ def change_permissions_recursive(path, mode):
self.rvDist = []
self.rvVal = []
for nx in range(x_dim):
- rvInfo = inp['randomVariables'][nx]
- self.rvName = self.rvName + [rvInfo['name']]
- self.rvDist = self.rvDist + [rvInfo['distribution']]
+ rvInfo = inp['randomVariables'][nx] # noqa: N806
+ self.rvName = self.rvName + [rvInfo['name']] # noqa: RUF005
+ self.rvDist = self.rvDist + [rvInfo['distribution']] # noqa: RUF005
if do_sampling:
- self.rvVal = self.rvVal + [
+ self.rvVal = self.rvVal + [ # noqa: RUF005
(rvInfo['upperbound'] + rvInfo['lowerbound']) / 2
]
else:
- self.rvVal = self.rvVal + [np.mean(X[:, nx])]
+ self.rvVal = self.rvVal + [np.mean(X[:, nx])] # noqa: RUF005
- def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
+ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt): # noqa: ARG002, C901
warnings.filterwarnings('ignore')
t_opt = time.time()
- m_list = list()
+ m_list = list() # noqa: C408
for ny in range(self.y_dim):
- print(f'y dimension {ny}:')
+ print(f'y dimension {ny}:') # noqa: T201
nopt = 10
#
@@ -1052,7 +1052,7 @@ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
m = m_tmp.copy()
id_opt = 1
- print(f'{1} among {nopt} Log-Likelihood: {m_tmp.log_likelihood()}')
+ print(f'{1} among {nopt} Log-Likelihood: {m_tmp.log_likelihood()}') # noqa: T201
# print(' Calibration time for each: {:.2f} s'.format(time.time() - t_unfix))
if time.time() - t_unfix > self.t_sim_each:
@@ -1064,7 +1064,7 @@ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
for parname in m_tmp.parameter_names():
if parname.endswith('lengthscale'):
- exec('m_tmp.' + parname + '=self.len')
+ exec('m_tmp.' + parname + '=self.len') # noqa: S102
if nugget_opt_tmp == 'Optimize':
m_tmp['Gaussian_noise.variance'].unfix()
@@ -1088,7 +1088,7 @@ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
m = m_tmp.copy()
id_opt = 1
- print(f'{2} among {nopt} Log-Likelihood: {m_tmp.log_likelihood()}')
+ print(f'{2} among {nopt} Log-Likelihood: {m_tmp.log_likelihood()}') # noqa: T201
# print(' Calibration time for each: {:.2f} s'.format(time.time() - t_unfix))
if time.time() - t_unfix > self.t_sim_each:
@@ -1100,14 +1100,14 @@ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
for parname in m_tmp.parameter_names():
if parname.endswith('lengthscale'):
if math.isnan(m.log_likelihood()):
- exec(
+ exec( # noqa: S102
'm_tmp.'
+ parname
+ '=np.random.exponential(1, (1, x_dim)) * m_init.'
+ parname
)
else:
- exec(
+ exec( # noqa: S102
'm_tmp.'
+ parname
+ '=np.random.exponential(1, (1, x_dim)) * m.'
@@ -1127,15 +1127,15 @@ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
elif nugget_opt_tmp == 'Zero':
m_tmp['Gaussian_noise.variance'].constrain_fixed(0)
- t_fix = time.time()
+ t_fix = time.time() # noqa: F841
try:
m_tmp.optimize()
# m_tmp.optimize_restarts(5)
- except Exception as ex:
- print(f'OS error: {ex}')
+ except Exception as ex: # noqa: BLE001
+ print(f'OS error: {ex}') # noqa: T201
- print(
+ print( # noqa: T201
f'{no + 3} among {nopt} Log-Likelihood: {m_tmp.log_likelihood()}'
)
# print(' Calibration time for each: {:.2f} s'.format(time.time() - t_fix))
@@ -1155,8 +1155,8 @@ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
msg = f'Error GP optimization failed for QoI #{ny + 1}'
self.errlog.exit(msg)
- m_list = m_list + [m]
- print(m)
+ m_list = m_list + [m] # noqa: RUF005
+ print(m) # noqa: T201
else:
if nugget_opt_tmp == 'Optimize':
m_tmp_list[ny].gpy_model.mixed_noise.Gaussian_noise.unfix()
@@ -1203,16 +1203,16 @@ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
id_opt = 0
self.calib_time = (time.time() - t_opt) * round(10 / nopt)
- print(f' Calibration time: {self.calib_time:.2f} s, id_opt={id_opt}')
+ print(f' Calibration time: {self.calib_time:.2f} s, id_opt={id_opt}') # noqa: T201
return m_tmp_list
- def __design_of_experiments(
+ def __design_of_experiments( # noqa: C901, PLR0915
self,
- X,
- Y,
+ X, # noqa: N803
+ Y, # noqa: N803
ac,
- ar,
+ ar, # noqa: ARG002
n_candi,
n_integ,
pre_m_list,
@@ -1224,19 +1224,19 @@ def __design_of_experiments(
if self.do_logtransform:
if np.min(Y) < 0:
msg = 'Error running SimCenterUQ. Response contains negative values. Please uncheck the log-transform option in the UQ tab'
- errlog.exit(msg)
- Y = np.log(Y)
+ errlog.exit(msg) # noqa: F821
+ Y = np.log(Y) # noqa: N806
if self.do_mf:
- if self.mf_case == 'data-model' or self.mf_case == 'data-data':
+ if self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
if np.min(self.Y_hf) < 0:
msg = 'Error running SimCenterUQ. Response contains negative values. Please uncheck the log-transform option in the UQ tab'
- errlog.exit(msg)
+ errlog.exit(msg) # noqa: F821
self.Y_hf = np.log(self.Y_hf)
elif self.mf_case == 'mode-data':
if np.min(self.Y_lf) < 0:
msg = 'Error running SimCenterUQ. Response contains negative values. Please uncheck the log-transform option in the UQ tab'
- errlog.exit(msg)
+ errlog.exit(msg) # noqa: F821
self.Y_lf = np.log(self.Y_lf)
r = 1 # adaptively
@@ -1250,8 +1250,8 @@ def __design_of_experiments(
if not self.do_mf:
m_tmp_list[i].set_XY(X, Y[:, i][np.newaxis].transpose())
else:
- if self.mf_case == 'data-model' or self.mf_case == 'data-data':
- X_list_tmp, Y_list_tmp = (
+ if self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
+ X_list_tmp, Y_list_tmp = ( # noqa: N806
emf.convert_lists_to_array.convert_xy_lists_to_arrays(
[X, self.X_hf],
[
@@ -1261,7 +1261,7 @@ def __design_of_experiments(
)
)
elif self.mf_case == 'model-data':
- X_list_tmp, Y_list_tmp = (
+ X_list_tmp, Y_list_tmp = ( # noqa: N806
emf.convert_lists_to_array.convert_xy_lists_to_arrays(
[self.X_lf, X],
[
@@ -1281,21 +1281,21 @@ def __design_of_experiments(
#
# cross validation errors
#
- Y_pred, Y_pred_var, e2 = self.__get_cross_validation(X, Y, m_list)
+ Y_pred, Y_pred_var, e2 = self.__get_cross_validation(X, Y, m_list) # noqa: N806
if self.do_logtransform:
mu = Y_pred
sig2 = Y_pred_var
median = np.exp(mu)
- mean = np.exp(mu + sig2 / 2)
+ mean = np.exp(mu + sig2 / 2) # noqa: F841
var = np.exp(2 * mu + sig2) * (np.exp(sig2) - 1)
- Y_pred = median
- Y_pred_var = var
+ Y_pred = median # noqa: N806
+ Y_pred_var = var # noqa: N806
if self.do_mf:
- if self.mf_case == 'data-model' or self.mf_case == 'data-data':
+ if self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
self.Y_hf = np.exp(self.Y_hf)
elif self.mf_case == 'model-data':
self.Y_lf = np.exp(self.Y_lf)
@@ -1340,17 +1340,17 @@ def __design_of_experiments(
#
yc1_pred, yc1_var = self.__predict(m_idx, xc1) # use only variance
- score1 = np.zeros(yc1_pred.shape)
+ score1 = np.zeros(yc1_pred.shape) # noqa: F841
cri1 = np.zeros(yc1_pred.shape)
cri2 = np.zeros(yc1_pred.shape)
- # TODO: is this the best?
+ # TODO: is this the best? # noqa: TD002
ll = self.xrange[:, 1] - self.xrange[:, 0]
for i in range(nc1):
if not self.do_mf:
wei = self.weights_node2(xc1[i, :], X, ll)
# phi = e2[closest_node(xc1[i, :], X, ll)]
# phi = e2[self.__closest_node(xc1[i, :], X)]
- elif self.mf_case == 'data-model' or self.mf_case == 'data-data':
+ elif self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
wei = self.weights_node2(xc1[i, :], self.X_hf, ll)
# phi = e2[closest_node(xc1[i, :], self.X_hf, ll)]
# phi = e2[self.__closest_node(xc1[i, :], self.X_hf)]
@@ -1363,7 +1363,7 @@ def __design_of_experiments(
cri2[i] = sum(e2[:, y_idx] / Y_pred_var[:, y_idx] * wei.T)
# cri2[i] = pow(phi[y_idx],r)
- VOI = np.zeros(yc1_pred.shape)
+ VOI = np.zeros(yc1_pred.shape) # noqa: N806
for i in range(nc1):
pdfvals = (
m_idx.kern.K(np.array([xq[i]]), xq) ** 2
@@ -1380,11 +1380,11 @@ def __design_of_experiments(
logcrimi1 = np.log(cri1[:, 0])
logcrimi2 = np.log(cri2[:, 0])
- idx_pareto_front = list()
+ idx_pareto_front = list() # noqa: C408, F841
rankid = np.zeros(nc1)
- varRank = np.zeros(nc1)
- biasRank = np.zeros(nc1)
- for id in range(nc1):
+ varRank = np.zeros(nc1) # noqa: N806
+ biasRank = np.zeros(nc1) # noqa: N806
+ for id in range(nc1): # noqa: A001
idx_tmp = np.argwhere(
(logcrimi1 >= logcrimi1[id]) * (logcrimi2 >= logcrimi2[id])
)
@@ -1392,11 +1392,11 @@ def __design_of_experiments(
biasRank[id] = np.sum(logcrimi2 >= logcrimi2[id])
rankid[id] = idx_tmp.size
- idx_rank = np.argsort(rankid)
- sort_rank = np.sort(rankid)
+ idx_rank = np.argsort(rankid) # noqa: F841
+ sort_rank = np.sort(rankid) # noqa: F841
num_1rank = np.sum(rankid == 1)
idx_1rank = list((np.argwhere(rankid == 1)).flatten())
- npareto = 4
+ npareto = 4 # noqa: F841
if num_1rank < self.cal_interval:
prob = np.ones((nc1,))
@@ -1407,8 +1407,8 @@ def __design_of_experiments(
)
else:
idx_pareto_candi = idx_1rank.copy()
- X_tmp = X
- Y_tmp = Y[:, y_idx][np.newaxis].T
+ X_tmp = X # noqa: N806
+ Y_tmp = Y[:, y_idx][np.newaxis].T # noqa: N806
m_tmp = m_idx.copy()
# get MMSEw
@@ -1420,12 +1420,12 @@ def __design_of_experiments(
idx_pareto_new = [best_global]
del idx_pareto_candi[best_local]
- for i in range(self.cal_interval - 1):
- X_tmp = np.vstack([X_tmp, xc1[best_global, :][np.newaxis]])
+ for i in range(self.cal_interval - 1): # noqa: B007
+ X_tmp = np.vstack([X_tmp, xc1[best_global, :][np.newaxis]]) # noqa: N806
# any variables
- Y_tmp = np.vstack([Y_tmp, np.array([[0]])])
+ Y_tmp = np.vstack([Y_tmp, np.array([[0]])]) # noqa: N806
m_tmp.set_XY(X=X_tmp, Y=Y_tmp)
- dummy, Yq_var = m_tmp.predict(xc1[idx_pareto_candi, :])
+ dummy, Yq_var = m_tmp.predict(xc1[idx_pareto_candi, :]) # noqa: N806
cri1 = Yq_var * VOI[idx_pareto_candi]
cri1 = (cri1 - np.min(cri1)) / (np.max(cri1) - np.min(cri1))
score_tmp = (
@@ -1434,7 +1434,7 @@ def __design_of_experiments(
best_local = np.argsort(-np.squeeze(score_tmp))[0]
best_global = idx_pareto_candi[best_local]
- idx_pareto_new = idx_pareto_new + [best_global]
+ idx_pareto_new = idx_pareto_new + [best_global] # noqa: RUF005
del idx_pareto_candi[best_local]
# score_tmp = Yq_var * cri2[idx_pareto_left]/Y_pred_var[closest_node(xc1[i, :], X, self.m_list, self.xrange)]
@@ -1443,7 +1443,7 @@ def __design_of_experiments(
idx_pareto = idx_pareto_new
update_point = xc1[idx_pareto, :]
- update_IMSE = 0
+ update_IMSE = 0 # noqa: N806
# import matplotlib.pyplot as plt
# plt.plot(logcrimi1, logcrimi2, 'x');plt.plot(logcrimi1[idx_pareto], logcrimi2[idx_pareto], 'x'); plt.show()
@@ -1468,16 +1468,16 @@ def __design_of_experiments(
idx_pareto2 = np.asarray(random_indices)
idx_pareto = np.asarray(idx_pareto)
idx_pareto = list(idx_pareto[idx_pareto2[0:self.cal_interval]])
- """
+ """ # noqa: W293
elif self.doe_method == 'imsew':
nq = round(n_integ)
m_stack = m_idx.copy()
- X_stack = X
- Y_stack = Y
+ X_stack = X # noqa: N806
+ Y_stack = Y # noqa: N806
update_point = np.zeros((self.cal_interval, self.x_dim))
- update_IMSE = np.zeros((self.cal_interval, 1))
+ update_IMSE = np.zeros((self.cal_interval, 1)) # noqa: N806
#
# Initial candidates
@@ -1499,7 +1499,7 @@ def __design_of_experiments(
self.xrange[nx, 0], self.xrange[nx, 1], (1, nq)
)
- # TODO: is diff(xrange) the best?
+ # TODO: is diff(xrange) the best? # noqa: TD002
ll = self.xrange[:, 1] - self.xrange[:, 0]
phiq = np.zeros((nq, y_dim))
for i in range(nq):
@@ -1514,29 +1514,29 @@ def __design_of_experiments(
for i in range(nc1)
)
result_objs = list(self.pool.starmap(imse, iterables))
- IMSEc1 = np.zeros(nc1)
- for IMSE_val, idx in result_objs:
+ IMSEc1 = np.zeros(nc1) # noqa: N806
+ for IMSE_val, idx in result_objs: # noqa: N806
IMSEc1[idx] = IMSE_val
- print(
+ print( # noqa: T201
f'IMSE: finding the next DOE {ni} in a parallel way.. time = {time.time() - tmp}'
) # 7s # 3-4s
else:
tmp = time.time()
phiqr = pow(phiq[:, y_idx], r)
- IMSEc1 = np.zeros(nc1)
+ IMSEc1 = np.zeros(nc1) # noqa: N806
for i in range(nc1):
IMSEc1[i], dummy = imse(
m_stack.copy(), xc1[i, :][np.newaxis], xq, phiqr, i
)
- print(
+ print( # noqa: T201
f'IMSE: finding the next DOE {ni} in a serial way.. time = {time.time() - tmp}'
) # 4s
new_idx = np.argmin(IMSEc1, axis=0)
x_point = xc1[new_idx, :][np.newaxis]
- X_stack = np.vstack([X_stack, x_point])
- Y_stack = np.zeros(
+ X_stack = np.vstack([X_stack, x_point]) # noqa: N806
+ Y_stack = np.zeros( # noqa: N806
(Y_stack.shape[0] + 1, Y.shape[1])
) # any variables
m_stack.set_XY(X=X_stack, Y=Y_stack)
@@ -1627,11 +1627,11 @@ def __design_of_experiments(
update_point = xc3[new_idx, :][np.newaxis]
update_IMSE = IMSE[new_idx]
- """
+ """ # noqa: W293
elif self.doe_method == 'random':
update_point = xc1[0 : self.cal_interval, :]
- update_IMSE = 0
+ update_IMSE = 0 # noqa: N806
elif self.doe_method == 'mmse':
sort_idx_score1 = np.argsort(
@@ -1641,7 +1641,7 @@ def __design_of_experiments(
xc2 = xc1[sort_idx_score1[0, 0:nc2], :]
update_point = xc2[0:1, :]
- update_IMSE = 0
+ update_IMSE = 0 # noqa: N806
elif self.doe_method == 'mmsew':
#
@@ -1661,21 +1661,21 @@ def __design_of_experiments(
phicr = pow(phic[:, y_idx], r)
- X_stack = X
- Y_stack = Y
+ X_stack = X # noqa: N806
+ Y_stack = Y # noqa: N806
update_point = np.zeros((self.cal_interval, self.x_dim))
- update_IMSE = np.zeros((self.cal_interval, 1))
+ update_IMSE = np.zeros((self.cal_interval, 1)) # noqa: N806
for ni in range(self.cal_interval):
yc1_pred, yc1_var = m_stack.predict(xc1) # use only variance
- MMSEc1 = yc1_var.flatten() * phicr.flatten()
+ MMSEc1 = yc1_var.flatten() * phicr.flatten() # noqa: N806
new_idx = np.argmax(MMSEc1, axis=0)
x_point = xc1[new_idx, :][np.newaxis]
- X_stack = np.vstack([X_stack, x_point])
- Y_stack = np.zeros(
+ X_stack = np.vstack([X_stack, x_point]) # noqa: N806
+ Y_stack = np.zeros( # noqa: N806
(Y_stack.shape[0] + 1, Y.shape[1])
) # any variables
m_stack.set_XY(X=X_stack, Y=Y_stack)
@@ -1688,15 +1688,15 @@ def __design_of_experiments(
+ self.doe_method
+ '>'
)
- errlog.exit(msg)
+ errlog.exit(msg) # noqa: F821
return update_point, m_list, update_IMSE, y_idx, Y_pred, Y_pred_var
def __normalized_mean_sq_error(self, yp, ye):
nt = yp.shape[0]
data_bound = np.max(ye, axis=0) - np.min(ye, axis=0)
- RMSE = np.sqrt(1 / nt * np.sum(pow(yp - ye, 2), axis=0))
- NRMSE = RMSE / data_bound
+ RMSE = np.sqrt(1 / nt * np.sum(pow(yp - ye, 2), axis=0)) # noqa: N806
+ NRMSE = RMSE / data_bound # noqa: N806
NRMSE[np.argwhere(data_bound == 0)] = 0
return NRMSE
@@ -1714,14 +1714,14 @@ def __closest_node(self, node, nodes):
dist_2 = np.einsum('ij,ij->i', deltas_norm, deltas_norm)
return np.argmin(dist_2)
- def __from_XY_into_list(self, X, Y):
- x_list = list()
- y_list = list()
+ def __from_XY_into_list(self, X, Y): # noqa: N802, N803
+ x_list = list() # noqa: C408
+ y_list = list() # noqa: C408
for i in range(Y.shape[1]):
- x_list = x_list + [
+ x_list = x_list + [ # noqa: RUF005
X,
]
- y_list = y_list + [
+ y_list = y_list + [ # noqa: RUF005
Y[
:,
[
@@ -1731,52 +1731,52 @@ def __from_XY_into_list(self, X, Y):
]
return x_list, y_list
- def __predict(self, m, X):
- if not self.do_mf:
+ def __predict(self, m, X): # noqa: N803
+ if not self.do_mf: # noqa: RET503
return m.predict(X)
- elif self.mf_case == 'data-model' or self.mf_case == 'data-data':
- X_list = convert_x_list_to_array([X, X])
- X_list_l = X_list[: X.shape[0]]
- X_list_h = X_list[X.shape[0] :]
+ elif self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: RET505, PLR1714
+ X_list = convert_x_list_to_array([X, X]) # noqa: N806
+ X_list_l = X_list[: X.shape[0]] # noqa: N806
+ X_list_h = X_list[X.shape[0] :] # noqa: N806
return m.predict(X_list_h)
elif self.mf_case == 'model-data':
# return m.predict(X)
- X_list = convert_x_list_to_array([X, X])
- X_list_l = X_list[: X.shape[0]]
- X_list_h = X_list[X.shape[0] :]
+ X_list = convert_x_list_to_array([X, X]) # noqa: N806
+ X_list_l = X_list[: X.shape[0]] # noqa: N806, F841
+ X_list_h = X_list[X.shape[0] :] # noqa: N806
return m.predict(X_list_h)
- def __get_cross_validation(self, X, Y, m_list):
+ def __get_cross_validation(self, X, Y, m_list): # noqa: N803
if not self.do_mf:
e2 = np.zeros(Y.shape)
- Y_pred = np.zeros(Y.shape)
- Y_pred_var = np.zeros(Y.shape)
+ Y_pred = np.zeros(Y.shape) # noqa: N806
+ Y_pred_var = np.zeros(Y.shape) # noqa: N806
for ny in range(Y.shape[1]):
m_tmp = m_list[ny].copy()
for ns in range(X.shape[0]):
- X_tmp = np.delete(X, ns, axis=0)
- Y_tmp = np.delete(Y, ns, axis=0)
+ X_tmp = np.delete(X, ns, axis=0) # noqa: N806
+ Y_tmp = np.delete(Y, ns, axis=0) # noqa: N806
m_tmp.set_XY(X=X_tmp, Y=Y_tmp[:, ny][np.newaxis].transpose())
x_loo = X[ns, :][np.newaxis]
# Y_pred_tmp, Y_err_tmp = m_tmp.predict(x_loo)
- Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp, x_loo)
+ Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp, x_loo) # noqa: N806
Y_pred[ns, ny] = Y_pred_tmp
Y_pred_var[ns, ny] = Y_err_tmp
e2[ns, ny] = pow(
(Y_pred[ns, ny] - Y[ns, ny]), 2
) # for nD outputs
- elif self.mf_case == 'data-model' or self.mf_case == 'data-data':
+ elif self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
e2 = np.zeros(self.Y_hf.shape)
- Y_pred = np.zeros(self.Y_hf.shape)
- Y_pred_var = np.zeros(self.Y_hf.shape)
+ Y_pred = np.zeros(self.Y_hf.shape) # noqa: N806
+ Y_pred_var = np.zeros(self.Y_hf.shape) # noqa: N806
for ny in range(Y.shape[1]):
m_tmp = deepcopy(m_list[ny])
for ns in range(self.X_hf.shape[0]):
- X_hf_tmp = np.delete(self.X_hf, ns, axis=0)
- Y_hf_tmp = np.delete(self.Y_hf, ns, axis=0)
- X_list_tmp, Y_list_tmp = (
+ X_hf_tmp = np.delete(self.X_hf, ns, axis=0) # noqa: N806
+ Y_hf_tmp = np.delete(self.Y_hf, ns, axis=0) # noqa: N806
+ X_list_tmp, Y_list_tmp = ( # noqa: N806
emf.convert_lists_to_array.convert_xy_lists_to_arrays(
[X, X_hf_tmp],
[
@@ -1787,7 +1787,7 @@ def __get_cross_validation(self, X, Y, m_list):
)
m_tmp.set_data(X=X_list_tmp, Y=Y_list_tmp)
x_loo = self.X_hf[ns][np.newaxis]
- Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp, x_loo)
+ Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp, x_loo) # noqa: N806
Y_pred[ns, ny] = Y_pred_tmp
Y_pred_var[ns, ny] = Y_err_tmp
e2[ns, ny] = pow(
@@ -1796,15 +1796,15 @@ def __get_cross_validation(self, X, Y, m_list):
elif self.mf_case == 'model-data':
e2 = np.zeros(Y.shape)
- Y_pred = np.zeros(Y.shape)
- Y_pred_var = np.zeros(Y.shape)
+ Y_pred = np.zeros(Y.shape) # noqa: N806
+ Y_pred_var = np.zeros(Y.shape) # noqa: N806
for ny in range(Y.shape[1]):
m_tmp = deepcopy(m_list[ny])
for ns in range(X.shape[0]):
- X_tmp = np.delete(X, ns, axis=0)
- Y_tmp = np.delete(Y, ns, axis=0)
- X_list_tmp, Y_list_tmp = (
+ X_tmp = np.delete(X, ns, axis=0) # noqa: N806
+ Y_tmp = np.delete(Y, ns, axis=0) # noqa: N806
+ X_list_tmp, Y_list_tmp = ( # noqa: N806
emf.convert_lists_to_array.convert_xy_lists_to_arrays(
[self.X_lf, X_tmp],
[
@@ -1816,7 +1816,7 @@ def __get_cross_validation(self, X, Y, m_list):
m_tmp.set_data(X=X_list_tmp, Y=Y_list_tmp)
# x_loo = np.hstack((X[ns], 1))[np.newaxis]
x_loo = self.X_hf[ns][np.newaxis]
- Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp, x_loo)
+ Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp, x_loo) # noqa: N806
Y_pred[ns, ny] = Y_pred_tmp
Y_pred_var[ns, ny] = Y_err_tmp
e2[ns, ny] = pow(
@@ -1825,16 +1825,16 @@ def __get_cross_validation(self, X, Y, m_list):
return Y_pred, Y_pred_var, e2
- def term(self):
+ def term(self): # noqa: D102
if self.do_parallel:
if self.run_type != 'runningLocal':
- print('RUNNING SUCCESSFUL')
+ print('RUNNING SUCCESSFUL') # noqa: T201
self.world.Abort(0) # to prevent deadlock
- def save_model(self, filename):
+ def save_model(self, filename): # noqa: C901, D102, PLR0915
import json
- with open(self.work_dir + '/' + filename + '.pkl', 'wb') as file:
+ with open(self.work_dir + '/' + filename + '.pkl', 'wb') as file: # noqa: PTH123
pickle.dump(self.m_list, file)
# json.dump(self.m_list, file)
@@ -1964,7 +1964,7 @@ def save_model(self, filename):
for ny in range(self.y_dim):
if not self.do_mf:
results['yExact'][self.g_name[ny]] = self.Y[:, ny].tolist()
- elif self.mf_case == 'data-model' or self.mf_case == 'data-data':
+ elif self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
results['yExact'][self.g_name[ny]] = self.Y_hf[:, ny].tolist()
elif self.mf_case == 'model-data':
results['yExact'][self.g_name[ny]] = self.Y[:, ny].tolist()
@@ -2037,7 +2037,7 @@ def save_model(self, filename):
results['outData'] = self.outData
if self.do_mf:
- if self.mf_case == 'data-model' or self.mf_case == 'data-data':
+ if self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
results['inpData_HF'] = self.inpData_hf
results['outData_HF'] = self.outData_hf
results['valSamp_HF'] = self.X_hf.shape[0]
@@ -2053,7 +2053,7 @@ def save_model(self, filename):
rvs['name'] = self.rvName[nx]
rvs['distribution'] = self.rvDist[nx]
rvs['value'] = self.rvVal[nx]
- rv_list = rv_list + [rvs]
+ rv_list = rv_list + [rvs] # noqa: RUF005
results['randomVariables'] = rv_list
# Used for surrogate
@@ -2064,13 +2064,13 @@ def save_model(self, filename):
results['modelInfo'][self.g_name[ny]] = {}
for parname in self.m_list[ny].parameter_names():
results['modelInfo'][self.g_name[ny]][parname] = list(
- eval('self.m_list[ny].' + parname)
+ eval('self.m_list[ny].' + parname) # noqa: S307
)
- with open(self.work_dir + '/dakota.out', 'w') as fp:
+ with open(self.work_dir + '/dakota.out', 'w') as fp: # noqa: PTH123
json.dump(results, fp, indent=1)
- with open(self.work_dir + '/GPresults.out', 'w') as file:
+ with open(self.work_dir + '/GPresults.out', 'w') as file: # noqa: PTH123
file.write('* Problem setting\n')
file.write(f' - dimension of x : {self.x_dim}\n')
file.write(f' - dimension of y : {self.y_dim}\n')
@@ -2120,7 +2120,7 @@ def save_model(self, filename):
m_tmp = self.m_list[ny]
for parname in m_tmp.parameter_names():
file.write(f' - {parname} ')
- parvals = eval('m_tmp.' + parname)
+ parvals = eval('m_tmp.' + parname) # noqa: S307
if len(parvals) == self.x_dim:
file.write('\n')
for nx in range(self.x_dim):
@@ -2133,10 +2133,10 @@ def save_model(self, filename):
file.close()
- print('Results Saved')
+ print('Results Saved') # noqa: T201
return 0
- def weights_node2(self, node, nodes, ls):
+ def weights_node2(self, node, nodes, ls): # noqa: D102
nodes = np.asarray(nodes)
deltas = nodes - node
@@ -2152,29 +2152,29 @@ def weights_node2(self, node, nodes, ls):
return weig / sum(weig)
-def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver):
- X = np.atleast_2d(X)
+def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver): # noqa: N802, N803, D103
+ X = np.atleast_2d(X) # noqa: N806
x_dim = X.shape[1]
if X.shape[0] > 1:
- errlog = errorLog(work_dir)
+ errlog = errorLog(work_dir) # noqa: F821
msg = 'do one simulation at a time'
errlog.exit(msg)
# (1) create "workdir.idx " folder :need C++17 to use the files system namespace
current_dir_i = work_dir + '/workdir.' + str(id_sim + 1)
- print(id_sim)
+ print(id_sim) # noqa: T201
try:
shutil.copytree(work_dir + '/templatedir', current_dir_i)
- except Exception as ex:
+ except Exception as ex: # noqa: BLE001
errlog = errorLog_in_pool(work_dir)
msg = 'Error running FEM: ' + str(ex)
errlog.exit(msg)
# (2) write param.in file
- outF = open(current_dir_i + '/params.in', 'w')
+ outF = open(current_dir_i + '/params.in', 'w') # noqa: SIM115, PTH123, N806
outF.write(f'{x_dim}\n')
for i in range(x_dim):
@@ -2185,10 +2185,10 @@ def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver):
os.chdir(current_dir_i)
workflow_run_command = f'{current_dir_i}/{workflowDriver}'
- subprocess.check_call(workflow_run_command, shell=True)
+ subprocess.check_call(workflow_run_command, shell=True) # noqa: S602
# (4) reading results
- if glob.glob('results.out'):
+ if glob.glob('results.out'): # noqa: PTH207
g = np.loadtxt('results.out').flatten()
else:
errlog = errorLog_in_pool(work_dir)
@@ -2210,20 +2210,20 @@ def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver):
return g, id_sim
-def run_FEM_batch(
- X,
+def run_FEM_batch( # noqa: N802, D103
+ X, # noqa: N803
id_sim,
rv_name,
do_parallel,
y_dim,
- os_type,
- run_type,
+ os_type, # noqa: ARG001
+ run_type, # noqa: ARG001
pool,
t_init,
t_thr,
- workflowDriver,
+ workflowDriver, # noqa: N803
):
- X = np.atleast_2d(X)
+ X = np.atleast_2d(X) # noqa: N806
# Windows
# if os_type.lower().startswith('win'):
# workflowDriver = "workflow_driver.bat"
@@ -2232,20 +2232,20 @@ def run_FEM_batch(
nsamp = X.shape[0]
if not do_parallel:
- Y = np.zeros((nsamp, y_dim))
+ Y = np.zeros((nsamp, y_dim)) # noqa: N806
for ns in range(nsamp):
Y[ns, :], id_sim_current = run_FEM(
X[ns, :], id_sim + ns, rv_name, work_dir, workflowDriver
)
if time.time() - t_init > t_thr:
- X = X[:ns, :]
- Y = Y[:ns, :]
+ X = X[:ns, :] # noqa: N806
+ Y = Y[:ns, :] # noqa: N806
break
return X, Y, id_sim_current + 1
if do_parallel:
- print(f'Running {nsamp} simulations in parallel')
+ print(f'Running {nsamp} simulations in parallel') # noqa: T201
tmp = time.time()
iterables = (
(X[i, :][np.newaxis], id_sim + i, rv_name, work_dir, workflowDriver)
@@ -2253,67 +2253,67 @@ def run_FEM_batch(
)
try:
result_objs = list(pool.starmap(run_FEM, iterables))
- print(f'Simulation time = {time.time() - tmp} s')
+ print(f'Simulation time = {time.time() - tmp} s') # noqa: T201
tmp = time.time()
except KeyboardInterrupt:
- print('Ctrl+c received, terminating and joining pool.')
+ print('Ctrl+c received, terminating and joining pool.') # noqa: T201
try:
pool.shutdown()
- except Exception:
+ except Exception: # noqa: BLE001
sys.exit()
tmp = time.time()
- print('=====================================')
- Nsim = len(list(result_objs))
- Y = np.zeros((Nsim, y_dim))
+ print('=====================================') # noqa: T201
+ Nsim = len(list(result_objs)) # noqa: N806
+ Y = np.zeros((Nsim, y_dim)) # noqa: N806
- for val, id in result_objs:
+ for val, id in result_objs: # noqa: A001
if np.isnan(np.sum(val)):
- Nsim = id - id_sim
- X = X[:Nsim, :]
- Y = Y[:Nsim, :]
+ Nsim = id - id_sim # noqa: N806
+ X = X[:Nsim, :] # noqa: N806
+ Y = Y[:Nsim, :] # noqa: N806
else:
Y[id - id_sim, :] = val
return X, Y, id_sim + Nsim
-def read_txt(text_dir, errlog):
- if not os.path.exists(text_dir):
+def read_txt(text_dir, errlog): # noqa: D103
+ if not os.path.exists(text_dir): # noqa: PTH110
msg = 'Error: file does not exist: ' + text_dir
errlog.exit(msg)
- with open(text_dir) as f:
+ with open(text_dir) as f: # noqa: PTH123
# Iterate through the file until the table starts
header_count = 0
for line in f:
if line.startswith('%'):
header_count = header_count + 1
- print(line)
+ print(line) # noqa: T201
# X = np.loadtxt(f, skiprows=header_count, delimiter=',')
try:
- with open(text_dir) as f:
- X = np.loadtxt(f, skiprows=header_count)
+ with open(text_dir) as f: # noqa: PTH123, PLW2901
+ X = np.loadtxt(f, skiprows=header_count) # noqa: N806
except ValueError:
- with open(text_dir) as f:
+ with open(text_dir) as f: # noqa: PTH123, PLW2901
try:
- X = np.genfromtxt(f, skip_header=header_count, delimiter=',')
+ X = np.genfromtxt(f, skip_header=header_count, delimiter=',') # noqa: N806
# if there are extra delimiter, remove nan
if np.isnan(X[-1, -1]):
- X = np.delete(X, -1, 1)
+ X = np.delete(X, -1, 1) # noqa: N806
# X = np.loadtxt(f, skiprows=header_count, delimiter=',')
except ValueError:
msg = 'Error: file format is not supported ' + text_dir
errlog.exit(msg)
if X.ndim == 1:
- X = np.array([X]).transpose()
+ X = np.array([X]).transpose() # noqa: N806
return X
-def closest_node(node, nodes, ll):
+def closest_node(node, nodes, ll): # noqa: D103
nodes = np.asarray(nodes)
deltas = nodes - node
deltas_norm = np.zeros(deltas.shape)
@@ -2324,14 +2324,14 @@ def closest_node(node, nodes, ll):
return np.argmin(dist_2)
-def imse(m_tmp, xcandi, xq, phiqr, i):
- X = m_tmp.X
- Y = m_tmp.Y
- X_tmp = np.vstack([X, xcandi])
- Y_tmp = np.zeros((Y.shape[0] + 1, Y.shape[1])) # any variables
+def imse(m_tmp, xcandi, xq, phiqr, i): # noqa: D103
+ X = m_tmp.X # noqa: N806
+ Y = m_tmp.Y # noqa: N806
+ X_tmp = np.vstack([X, xcandi]) # noqa: N806
+ Y_tmp = np.zeros((Y.shape[0] + 1, Y.shape[1])) # any variables # noqa: N806
m_tmp.set_XY(X=X_tmp, Y=Y_tmp)
- dummy, Yq_var = m_tmp.predict(xq)
- IMSEc1 = 1 / xq.shape[0] * sum(phiqr.flatten() * Yq_var.flatten())
+ dummy, Yq_var = m_tmp.predict(xq) # noqa: N806
+ IMSEc1 = 1 / xq.shape[0] * sum(phiqr.flatten() * Yq_var.flatten()) # noqa: N806
return IMSEc1, i
@@ -2339,32 +2339,32 @@ def imse(m_tmp, xcandi, xq, phiqr, i):
# ==========================================================================================
-class errorLog_in_pool:
+class errorLog_in_pool: # noqa: D101
def __init__(self, work_dir):
- self.file = open(f'{work_dir}/dakota.err', 'w')
+ self.file = open(f'{work_dir}/dakota.err', 'w') # noqa: SIM115, PTH123
- def write(self, msg):
- print(msg)
+ def write(self, msg): # noqa: D102
+ print(msg) # noqa: T201
self.file.write(msg)
self.file.close()
- raise WorkerStopException()
+ raise WorkerStopException() # noqa: RSE102, F821
# exit(-1)
- def terminate(self):
+ def terminate(self): # noqa: D102
self.file.close()
-def build_surrogate(work_dir, inputFile, workflowDriver, os_type, run_type):
+def build_surrogate(work_dir, inputFile, workflowDriver, os_type, run_type): # noqa: N803, D103
# t_total = time.process_time()
filename = 'SimGpModel'
- print('FILE: ' + work_dir + '/templatedir/' + inputFile)
- f = open(work_dir + '/templatedir/' + inputFile)
+ print('FILE: ' + work_dir + '/templatedir/' + inputFile) # noqa: T201
+ f = open(work_dir + '/templatedir/' + inputFile) # noqa: SIM115, PTH123
try:
inp = json.load(f)
except ValueError:
msg = 'invalid json format - ' + inputFile
- errlog.exit(msg)
+ errlog.exit(msg) # noqa: F821
f.close()
@@ -2374,7 +2374,7 @@ def build_surrogate(work_dir, inputFile, workflowDriver, os_type, run_type):
+ inp['UQ_Method']['uqType']
+ '> but called program'
)
- errlog.exit(msg)
+ errlog.exit(msg) # noqa: F821
gp = GpFromModel(
work_dir,
@@ -2383,7 +2383,7 @@ def build_surrogate(work_dir, inputFile, workflowDriver, os_type, run_type):
run_type,
os_type,
inp,
- errlog,
+ errlog, # noqa: F821
)
gp.save_model(filename)
gp.term()
@@ -2396,13 +2396,13 @@ def build_surrogate(work_dir, inputFile, workflowDriver, os_type, run_type):
# the actual execution
if __name__ == '__main__':
- inputArgs = sys.argv
+ inputArgs = sys.argv # noqa: N816
work_dir = inputArgs[1].replace(os.sep, '/')
# errlog = errorLog(work_dir)
- inputFile = inputArgs[2]
- workflowDriver = inputArgs[3]
+ inputFile = inputArgs[2] # noqa: N816
+ workflowDriver = inputArgs[3] # noqa: N816
os_type = inputArgs[4]
run_type = inputArgs[5]
diff --git a/modules/performUQ/SimCenterUQ/notBeingUsed/surrogateBuild_old.py b/modules/performUQ/SimCenterUQ/notBeingUsed/surrogateBuild_old.py
index be1433998..1bbb44ba9 100644
--- a/modules/performUQ/SimCenterUQ/notBeingUsed/surrogateBuild_old.py
+++ b/modules/performUQ/SimCenterUQ/notBeingUsed/surrogateBuild_old.py
@@ -1,4 +1,4 @@
-import glob
+import glob # noqa: INP001, D100
import json
import math
import os
@@ -12,7 +12,7 @@
from copy import deepcopy
import emukit.multi_fidelity as emf
-import GPy as GPy
+import GPy as GPy # noqa: PLC0414
import numpy as np
from emukit.model_wrappers.gpy_model_wrappers import GPyMultiOutputWrapper
from emukit.multi_fidelity.convert_lists_to_array import (
@@ -22,12 +22,12 @@
from scipy.stats import lognorm, norm
-class GpFromModel:
- def __init__(
+class GpFromModel: # noqa: D101
+ def __init__( # noqa: C901, PLR0912, PLR0915
self,
work_dir,
- inputFile,
- workflowDriver,
+ inputFile, # noqa: N803
+ workflowDriver, # noqa: N803
run_type,
os_type,
inp,
@@ -45,12 +45,12 @@ def __init__(
# From external READ JSON FILE
#
- rv_name = list()
- self.g_name = list()
+ rv_name = list() # noqa: C408
+ self.g_name = list() # noqa: C408
x_dim = 0
y_dim = 0
for rv in inp['randomVariables']:
- rv_name = rv_name + [rv['name']]
+ rv_name = rv_name + [rv['name']] # noqa: RUF005
x_dim += 1
if x_dim == 0:
@@ -59,11 +59,11 @@ def __init__(
for g in inp['EDP']:
if g['length'] == 1: # scalar
- self.g_name = self.g_name + [g['name']]
+ self.g_name = self.g_name + [g['name']] # noqa: RUF005
y_dim += 1
else: # vector
for nl in range(g['length']):
- self.g_name = self.g_name + ['{}_{}'.format(g['name'], nl + 1)]
+ self.g_name = self.g_name + ['{}_{}'.format(g['name'], nl + 1)] # noqa: RUF005
y_dim += 1
if y_dim == 0:
@@ -79,11 +79,11 @@ def __init__(
self.do_predictive = False
automate_doe = False
- surrogateInfo = inp['UQ_Method']['surrogateMethodInfo']
+ surrogateInfo = inp['UQ_Method']['surrogateMethodInfo'] # noqa: N806
try:
self.do_parallel = surrogateInfo['parallelExecution']
- except:
+ except: # noqa: E722
self.do_parallel = True
if self.do_parallel:
@@ -101,8 +101,8 @@ def __init__(
self.pool = MPIPoolExecutor()
self.n_processor = self.world.Get_size()
# self.n_processor =20
- print('nprocessor :')
- print(self.n_processor)
+ print('nprocessor :') # noqa: T201
+ print(self.n_processor) # noqa: T201
# self.cal_interval = 5
self.cal_interval = self.n_processor
@@ -116,8 +116,8 @@ def __init__(
do_simulation = True
self.use_existing = surrogateInfo['existingDoE']
if self.use_existing:
- self.inpData = os.path.join(work_dir, 'templatedir/inpFile.in')
- self.outData = os.path.join(work_dir, 'templatedir/outFile.in')
+ self.inpData = os.path.join(work_dir, 'templatedir/inpFile.in') # noqa: PTH118
+ self.outData = os.path.join(work_dir, 'templatedir/outFile.in') # noqa: PTH118
thr_count = surrogateInfo['samples'] # number of samples
if surrogateInfo['advancedOpt']:
@@ -140,10 +140,10 @@ def __init__(
self.doe_method = 'None' # default
do_doe = False
# self.inpData = surrogateInfo['inpFile']
- self.inpData = os.path.join(work_dir, 'templatedir/inpFile.in')
+ self.inpData = os.path.join(work_dir, 'templatedir/inpFile.in') # noqa: PTH118
if not do_simulation:
# self.outData = surrogateInfo['outFile']
- self.outData = os.path.join(work_dir, 'templatedir/outFile.in')
+ self.outData = os.path.join(work_dir, 'templatedir/outFile.in') # noqa: PTH118
elif surrogateInfo['method'] == 'Import Multi-fidelity Data File':
self.do_mf = True
@@ -156,15 +156,15 @@ def __init__(
self.use_existing_hf = surrogateInfo['existingDoE_HF']
self.samples_hf = surrogateInfo['samples_HF']
if self.use_existing_hf:
- self.inpData = os.path.join(
+ self.inpData = os.path.join( # noqa: PTH118
work_dir, 'templatedir/inpFile_HF.in'
)
- self.outData = os.path.join(
+ self.outData = os.path.join( # noqa: PTH118
work_dir, 'templatedir/outFile_HF.in'
)
else:
- self.inpData_hf = os.path.join(work_dir, 'templatedir/inpFile_HF.in')
- self.outData_hf = os.path.join(work_dir, 'templatedir/outFile_HF.in')
+ self.inpData_hf = os.path.join(work_dir, 'templatedir/inpFile_HF.in') # noqa: PTH118
+ self.outData_hf = os.path.join(work_dir, 'templatedir/outFile_HF.in') # noqa: PTH118
self.X_hf = read_txt(self.inpData_hf, errlog)
self.Y_hf = read_txt(self.outData_hf, errlog)
if self.X_hf.shape[0] != self.Y_hf.shape[0]:
@@ -175,15 +175,15 @@ def __init__(
self.use_existing_lf = surrogateInfo['existingDoE_LF']
self.samples_lf = surrogateInfo['samples_LF']
if self.use_existing_lf:
- self.inpData = os.path.join(
+ self.inpData = os.path.join( # noqa: PTH118
work_dir, 'templatedir/inpFile_LF.in'
)
- self.outData = os.path.join(
+ self.outData = os.path.join( # noqa: PTH118
work_dir, 'templatedir/outFile_LF.in'
)
else:
- self.inpData_lf = os.path.join(work_dir, 'templatedir/inpFile_LF.in')
- self.outData_lf = os.path.join(work_dir, 'templatedir/outFile_LF.in')
+ self.inpData_lf = os.path.join(work_dir, 'templatedir/inpFile_LF.in') # noqa: PTH118
+ self.outData_lf = os.path.join(work_dir, 'templatedir/outFile_LF.in') # noqa: PTH118
self.X_lf = read_txt(self.inpData_lf, errlog)
self.Y_lf = read_txt(self.outData_lf, errlog)
if self.X_lf.shape[0] != self.Y_lf.shape[0]:
@@ -267,19 +267,19 @@ def __init__(
errlog.exit(msg)
if nugget_opt == 'Fixed Values':
- for Vals in self.nuggetVal:
+ for Vals in self.nuggetVal: # noqa: N806
if not np.isscalar(Vals):
msg = 'Error reading json: provide nugget values of each QoI with comma delimiter'
errlog.exit(msg)
elif nugget_opt == 'Fixed Bounds':
- for Bous in self.nuggetVal:
+ for Bous in self.nuggetVal: # noqa: N806
if np.isscalar(Bous):
msg = 'Error reading json: provide nugget bounds of each QoI in brackets with comma delimiter, e.g. [0.0,1.0],[0.0,2.0],...'
errlog.exit(msg)
elif isinstance(Bous, list):
msg = 'Error reading json: provide both lower and upper bounds of nugget'
errlog.exit(msg)
- elif Bous.shape[0] != 2:
+ elif Bous.shape[0] != 2: # noqa: PLR2004
msg = 'Error reading json: provide nugget bounds of each QoI in brackets with comma delimiter, e.g. [0.0,1.0],[0.0,2.0],...'
errlog.exit(msg)
elif Bous[0] > Bous[1]:
@@ -313,7 +313,7 @@ def __init__(
#
if do_sampling:
- thr_NRMSE = surrogateInfo['accuracyLimit']
+ thr_NRMSE = surrogateInfo['accuracyLimit'] # noqa: N806
thr_t = surrogateInfo['timeLimit'] * 60
np.random.seed(surrogateInfo['seed'])
@@ -337,8 +337,8 @@ def __init__(
#
if self.use_existing:
- X_tmp = read_txt(self.inpData, errlog)
- Y_tmp = read_txt(self.outData, errlog)
+ X_tmp = read_txt(self.inpData, errlog) # noqa: N806
+ Y_tmp = read_txt(self.outData, errlog) # noqa: N806
n_ex = X_tmp.shape[0]
if self.do_mf:
@@ -368,8 +368,8 @@ def __init__(
# msg = 'Error reading json: # of initial DoE should be greater than 0'
# errlog.exit(msg)
user_init = -1
- X_tmp = np.zeros((0, x_dim))
- Y_tmp = np.zeros((0, y_dim))
+ X_tmp = np.zeros((0, x_dim)) # noqa: N806
+ Y_tmp = np.zeros((0, y_dim)) # noqa: N806
if user_init < 0:
n_init_ref = min(4 * x_dim, thr_count + n_ex - 1, 500)
@@ -387,7 +387,7 @@ def __init__(
n_iter = thr_count - n_init
- def FEM_batch(Xs, id_sim):
+ def FEM_batch(Xs, id_sim): # noqa: N802, N803
return run_FEM_batch(
Xs,
id_sim,
@@ -405,14 +405,14 @@ def FEM_batch(Xs, id_sim):
# check validity of datafile
if n_ex > 0:
# Y_test, self.id_sim = FEM_batch(X_tmp[0, :][np.newaxis], self.id_sim)
- # TODO : Fix this
- print(X_tmp[0, :][np.newaxis].shape)
- X_test, Y_test, self.id_sim = FEM_batch(
+ # TODO : Fix this # noqa: TD002
+ print(X_tmp[0, :][np.newaxis].shape) # noqa: T201
+ X_test, Y_test, self.id_sim = FEM_batch( # noqa: N806
X_tmp[0, :][np.newaxis], self.id_sim
)
if (
np.sum(
- abs((Y_test - Y_tmp[0, :][np.newaxis]) / Y_test) > 0.01,
+ abs((Y_test - Y_tmp[0, :][np.newaxis]) / Y_test) > 0.01, # noqa: PLR2004
axis=1,
)
> 0
@@ -429,17 +429,17 @@ def FEM_batch(Xs, id_sim):
#
if n_init > 0:
- U = lhs(x_dim, samples=(n_init))
- X = np.vstack([X_tmp, np.zeros((n_init, x_dim))])
+ U = lhs(x_dim, samples=(n_init)) # noqa: N806
+ X = np.vstack([X_tmp, np.zeros((n_init, x_dim))]) # noqa: N806
for nx in range(x_dim):
X[n_ex : n_ex + n_init, nx] = (
U[:, nx] * (self.xrange[nx, 1] - self.xrange[nx, 0])
+ self.xrange[nx, 0]
)
else:
- X = X_tmp
+ X = X_tmp # noqa: N806
- if sum(abs(self.len / self.xrange[:, 0]) < 1.0e-7) > 1:
+ if sum(abs(self.len / self.xrange[:, 0]) < 1.0e-7) > 1: # noqa: PLR2004
msg = 'Error : upperbound and lowerbound should not be the same'
errlog.exit(msg)
@@ -447,14 +447,14 @@ def FEM_batch(Xs, id_sim):
else:
n_ex = 0
- thr_NRMSE = 0.02 # default
+ thr_NRMSE = 0.02 # default # noqa: N806
thr_t = float('inf')
#
# Read sample locations from directory
#
- X = read_txt(self.inpData, errlog)
+ X = read_txt(self.inpData, errlog) # noqa: N806
if self.do_mf:
if X.shape[1] != self.X_hf.shape[1]:
@@ -474,7 +474,7 @@ def FEM_batch(Xs, id_sim):
# give error
- if thr_count <= 2:
+ if thr_count <= 2: # noqa: PLR2004
msg = 'Number of samples should be greater than 2.'
errlog.exit(msg)
@@ -501,21 +501,21 @@ def FEM_batch(Xs, id_sim):
#
# SimCenter workflow setting
#
- if os.path.exists(f'{work_dir}/workdir.1'):
+ if os.path.exists(f'{work_dir}/workdir.1'): # noqa: PTH110
is_left = True
idx = 0
def change_permissions_recursive(path, mode):
- for root, dirs, files in os.walk(path, topdown=False):
- for dir in [os.path.join(root, d) for d in dirs]:
- os.chmod(dir, mode)
- for file in [os.path.join(root, f) for f in files]:
- os.chmod(file, mode)
+ for root, dirs, files in os.walk(path, topdown=False): # noqa: B007
+ for dir in [os.path.join(root, d) for d in dirs]: # noqa: A001, PTH118
+ os.chmod(dir, mode) # noqa: PTH101
+ for file in [os.path.join(root, f) for f in files]: # noqa: PTH118
+ os.chmod(file, mode) # noqa: PTH101
while is_left:
idx = idx + 1
try:
- if os.path.exists(
+ if os.path.exists( # noqa: PTH110
f'{work_dir}/workdir.{idx}/{workflowDriver}'
):
# os.chmod('{}/workdir.{}'.format(work_dir, idx), 777)
@@ -523,33 +523,33 @@ def change_permissions_recursive(path, mode):
f'{work_dir}/workdir.{idx}', 0o777
)
my_dir = f'{work_dir}/workdir.{idx}'
- os.chmod(my_dir, 0o777)
+ os.chmod(my_dir, 0o777) # noqa: S103, PTH101
shutil.rmtree(my_dir)
# shutil.rmtree('{}/workdir.{}'.format(work_dir, idx), ignore_errors=False, onerror=handleRemoveReadonly)
- except Exception as ex:
- print(ex)
+ except Exception as ex: # noqa: BLE001
+ print(ex) # noqa: T201
is_left = True
break
- print('Cleaned the working directory')
+ print('Cleaned the working directory') # noqa: T201
else:
- print('Work directory is clean')
+ print('Work directory is clean') # noqa: T201
- if os.path.exists(f'{work_dir}/dakotaTab.out'):
- os.remove(f'{work_dir}/dakotaTab.out')
+ if os.path.exists(f'{work_dir}/dakotaTab.out'): # noqa: PTH110
+ os.remove(f'{work_dir}/dakotaTab.out') # noqa: PTH107
- if os.path.exists(f'{work_dir}/inputTab.out'):
- os.remove(f'{work_dir}/inputTab.out')
+ if os.path.exists(f'{work_dir}/inputTab.out'): # noqa: PTH110
+ os.remove(f'{work_dir}/inputTab.out') # noqa: PTH107
- if os.path.exists(f'{work_dir}/outputTab.out'):
- os.remove(f'{work_dir}/outputTab.out')
+ if os.path.exists(f'{work_dir}/outputTab.out'): # noqa: PTH110
+ os.remove(f'{work_dir}/outputTab.out') # noqa: PTH107
- if os.path.exists(f'{work_dir}/SimGpModel.pkl'):
- os.remove(f'{work_dir}/SimGpModel.pkl')
+ if os.path.exists(f'{work_dir}/SimGpModel.pkl'): # noqa: PTH110
+ os.remove(f'{work_dir}/SimGpModel.pkl') # noqa: PTH107
- if os.path.exists(f'{work_dir}/verif.out'):
- os.remove(f'{work_dir}/verif.out')
+ if os.path.exists(f'{work_dir}/verif.out'): # noqa: PTH110
+ os.remove(f'{work_dir}/verif.out') # noqa: PTH107
# func = self.__run_FEM(X,self.id_sim, self.rv_name)
@@ -558,9 +558,9 @@ def change_permissions_recursive(path, mode):
#
t_tmp = time.time()
- X_fem, Y_fem, self.id_sim = FEM_batch(X[n_ex:, :], self.id_sim)
- Y = np.vstack((Y_tmp, Y_fem))
- X = np.vstack((X[0:n_ex, :], X_fem))
+ X_fem, Y_fem, self.id_sim = FEM_batch(X[n_ex:, :], self.id_sim) # noqa: N806
+ Y = np.vstack((Y_tmp, Y_fem)) # noqa: N806
+ X = np.vstack((X[0:n_ex, :], X_fem)) # noqa: N806
t_sim_all = time.time() - t_tmp
@@ -574,8 +574,8 @@ def change_permissions_recursive(path, mode):
#
if self.do_predictive:
n_pred = 100
- Xt = np.zeros((n_pred, x_dim))
- U = lhs(x_dim, samples=n_pred)
+ Xt = np.zeros((n_pred, x_dim)) # noqa: N806
+ U = lhs(x_dim, samples=n_pred) # noqa: N806
for nx in range(x_dim):
Xt[:, nx] = (
U[:, nx] * (self.xrange[nx, 1] - self.xrange[nx, 0])
@@ -586,14 +586,14 @@ def change_permissions_recursive(path, mode):
# for ns in range(n_pred):
# Yt[ns, :],self.id_sim = run_FEM(Xt[ns, :][np.newaxis],self.id_sim, self.rv_name)
- Yt = np.zeros((n_pred, y_dim))
- Xt, Yt, self.id_sim = FEM_batch(Xt, self.id_sim)
+ Yt = np.zeros((n_pred, y_dim)) # noqa: N806
+ Xt, Yt, self.id_sim = FEM_batch(Xt, self.id_sim) # noqa: N806
else:
#
# READ SAMPLES FROM DIRECTORY
#
- Y = read_txt(self.outData, errlog)
+ Y = read_txt(self.outData, errlog) # noqa: N806
if self.do_mf:
if Y.shape[1] != self.Y_hf.shape[1]:
@@ -628,9 +628,9 @@ def change_permissions_recursive(path, mode):
if not self.do_mf:
kg = kr
- self.m_list = list()
+ self.m_list = list() # noqa: C408
for i in range(y_dim):
- self.m_list = self.m_list + [
+ self.m_list = self.m_list + [ # noqa: RUF005
GPy.models.GPRegression(
X,
Y[:, i][np.newaxis].transpose(),
@@ -640,7 +640,7 @@ def change_permissions_recursive(path, mode):
]
for parname in self.m_list[i].parameter_names():
if parname.endswith('lengthscale'):
- exec('self.m_list[i].' + parname + '=self.len')
+ exec('self.m_list[i].' + parname + '=self.len') # noqa: S102
else:
kgs = emf.kernels.LinearMultiFidelityKernel([kr.copy(), kr.copy()])
@@ -655,22 +655,22 @@ def change_permissions_recursive(path, mode):
msg = f'Error importing input data: dimension of low ({X.shape[1]}) and high ({self.X_hf.shape[1]}) fidelity models (datasets) are inconsistent'
errlog.exit(msg)
- if self.mf_case == 'data-model' or self.mf_case == 'data-data':
- X_list, Y_list = (
+ if self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
+ X_list, Y_list = ( # noqa: N806
emf.convert_lists_to_array.convert_xy_lists_to_arrays(
[X, self.X_hf], [Y, self.Y_hf]
)
)
elif self.mf_case == 'model-data':
- X_list, Y_list = (
+ X_list, Y_list = ( # noqa: N806
emf.convert_lists_to_array.convert_xy_lists_to_arrays(
[self.X_lf, X], [self.Y_lf, Y]
)
)
- self.m_list = list()
- for i in range(y_dim):
- self.m_list = self.m_list + [
+ self.m_list = list() # noqa: C408
+ for i in range(y_dim): # noqa: B007
+ self.m_list = self.m_list + [ # noqa: RUF005
GPyMultiOutputWrapper(
emf.models.GPyLinearMultiFidelityModel(
X_list, Y_list, kernel=kgs.copy(), n_fidelities=2
@@ -696,7 +696,7 @@ def change_permissions_recursive(path, mode):
break_doe = False
- print('======== RUNNING GP DoE ===========')
+ print('======== RUNNING GP DoE ===========') # noqa: T201
exit_code = 'count' # num iter
i = 0
x_new = np.zeros((0, x_dim))
@@ -705,9 +705,9 @@ def change_permissions_recursive(path, mode):
doe_off = False # false if true
while not doe_off:
- t = time.time()
+ t = time.time() # noqa: F841
if (
- self.doe_method == 'random'
+ self.doe_method == 'random' # noqa: PLR1714
or self.doe_method == 'pareto'
or np.mod(i, self.cal_interval) == 0
):
@@ -716,7 +716,7 @@ def change_permissions_recursive(path, mode):
do_cal = False
t_tmp = time.time()
- [x_new, self.m_list, err, idx, Y_cv, Y_cv_var] = (
+ [x_new, self.m_list, err, idx, Y_cv, Y_cv_var] = ( # noqa: N806
self.__design_of_experiments(
X,
Y,
@@ -732,33 +732,33 @@ def change_permissions_recursive(path, mode):
)
t_doe = time.time() - t_tmp
- print(f'DoE Time: {t_doe:.2f} s')
+ print(f'DoE Time: {t_doe:.2f} s') # noqa: T201
if automate_doe:
if t_doe > self.t_sim_each:
break_doe = True
- print('========>> DOE OFF')
+ print('========>> DOE OFF') # noqa: T201
n_left = n_iter - i
break
if not self.do_mf:
- NRMSE_val = self.__normalized_mean_sq_error(Y_cv, Y)
- elif self.mf_case == 'data-model' or self.mf_case == 'data-data':
- NRMSE_val = self.__normalized_mean_sq_error(Y_cv, self.Y_hf)
+ NRMSE_val = self.__normalized_mean_sq_error(Y_cv, Y) # noqa: N806
+ elif self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
+ NRMSE_val = self.__normalized_mean_sq_error(Y_cv, self.Y_hf) # noqa: N806
elif self.mf_case == 'model-data':
- NRMSE_val = self.__normalized_mean_sq_error(Y_cv, Y)
+ NRMSE_val = self.__normalized_mean_sq_error(Y_cv, Y) # noqa: N806
self.NRMSE_hist = np.vstack((self.NRMSE_hist, np.array(NRMSE_val)))
self.NRMSE_idx = np.vstack((self.NRMSE_idx, i))
if self.do_predictive:
- Yt_pred = np.zeros((n_pred, y_dim))
+ Yt_pred = np.zeros((n_pred, y_dim)) # noqa: N806
for ny in range(y_dim):
y_pred_tmp, dummy = self.__predict(self.m_list[ny], Xt)
Yt_pred[:, ny] = y_pred_tmp.transpose()
if self.do_logtransform:
- Yt_pred = np.exp(Yt_pred)
- NRMSE_pred_val = self.__normalized_mean_sq_error(Yt_pred, Yt)
+ Yt_pred = np.exp(Yt_pred) # noqa: N806
+ NRMSE_pred_val = self.__normalized_mean_sq_error(Yt_pred, Yt) # noqa: N806
self.NRMSE_pred_hist = np.vstack(
(self.NRMSE_pred_hist, np.array(NRMSE_pred_val))
)
@@ -803,16 +803,16 @@ def change_permissions_recursive(path, mode):
x_new, y_new, self.id_sim = FEM_batch(x_new, self.id_sim)
# print(">> {:.2f} s".format(time.time() - t_init))
- X = np.vstack([X, x_new])
- Y = np.vstack([Y, y_new])
+ X = np.vstack([X, x_new]) # noqa: N806
+ Y = np.vstack([Y, y_new]) # noqa: N806
- print('======== RUNNING GP Calibration ===========')
+ print('======== RUNNING GP Calibration ===========') # noqa: T201
# not used
if break_doe:
- X_tmp = np.zeros((n_left, x_dim))
- Y_tmp = np.zeros((n_left, y_dim))
- U = lhs(x_dim, samples=n_left)
+ X_tmp = np.zeros((n_left, x_dim)) # noqa: N806
+ Y_tmp = np.zeros((n_left, y_dim)) # noqa: N806
+ U = lhs(x_dim, samples=n_left) # noqa: N806
for nx in range(x_dim):
# X[:,nx] = np.random.uniform(xrange[nx,0], xrange[nx,1], (1, n_init))
X_tmp[:, nx] = (
@@ -820,7 +820,7 @@ def change_permissions_recursive(path, mode):
+ self.xrange[nx, 0]
)
- X_tmp, Y_tmp, self.id_sim = FEM_batch(X_tmp, self.id_sim)
+ X_tmp, Y_tmp, self.id_sim = FEM_batch(X_tmp, self.id_sim) # noqa: N806
# for ns in np.arange(n_left):
# Y_tmp[ns, :],self.id_sim = run_FEM(X_tmp[ns, :][np.newaxis],self.id_sim, self.rv_name)
@@ -830,8 +830,8 @@ def change_permissions_recursive(path, mode):
# Y_tmp = Y_tmp[:ns, :]
# break
- X = np.vstack((X, X_tmp))
- Y = np.vstack((Y, Y_tmp))
+ X = np.vstack((X, X_tmp)) # noqa: N806
+ Y = np.vstack((Y, Y_tmp)) # noqa: N806
do_doe = False
# if not do_doe:
@@ -886,16 +886,16 @@ def change_permissions_recursive(path, mode):
# plt.show()
# plt.plot(Y_cv[:,1], Y[:,1], 'x')
# plt.show()
- print(f'my exit code = {exit_code}')
- print(f'1. count = {self.id_sim}')
- print(f'2. max(NRMSE) = {np.max(NRMSE_val)}')
- print(f'3. time = {sim_time:.2f} s')
+ print(f'my exit code = {exit_code}') # noqa: T201
+ print(f'1. count = {self.id_sim}') # noqa: T201
+ print(f'2. max(NRMSE) = {np.max(NRMSE_val)}') # noqa: T201
+ print(f'3. time = {sim_time:.2f} s') # noqa: T201
# for user information
if do_simulation:
n_err = 1000
- Xerr = np.zeros((n_err, x_dim))
- U = lhs(x_dim, samples=n_err)
+ Xerr = np.zeros((n_err, x_dim)) # noqa: N806
+ U = lhs(x_dim, samples=n_err) # noqa: N806
for nx in range(x_dim):
Xerr[:, nx] = (
U[:, nx] * (self.xrange[nx, 1] - self.xrange[nx, 0])
@@ -936,7 +936,7 @@ def change_permissions_recursive(path, mode):
# exec('y_pred_prior_var[ns,ny]=m_tmp.' + parname)
# error_ratio1_Pr = (y_pred_var / y_pred_prior_var)
- error_ratio2_Pr = y_pred_var / y_data_var
+ error_ratio2_Pr = y_pred_var / y_data_var # noqa: N806
# np.max(error_ratio1_Pr, axis=0)
np.max(error_ratio2_Pr, axis=0)
@@ -948,14 +948,14 @@ def change_permissions_recursive(path, mode):
self.perc_thr = 1 - (self.perc_thr) * 0.001 # ratio=simulation/sampling
corr_val = np.zeros((y_dim,))
- R2_val = np.zeros((y_dim,))
+ R2_val = np.zeros((y_dim,)) # noqa: N806
for ny in range(y_dim):
if not self.do_mf:
- Y_ex = Y[:, ny]
- elif self.mf_case == 'data-model' or self.mf_case == 'data-data':
- Y_ex = self.Y_hf[:, ny]
+ Y_ex = Y[:, ny] # noqa: N806
+ elif self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
+ Y_ex = self.Y_hf[:, ny] # noqa: N806
elif self.mf_case == 'model-data':
- Y_ex = Y[:, ny]
+ Y_ex = Y[:, ny] # noqa: N806
corr_val[ny] = np.corrcoef(Y_ex, Y_cv[:, ny])[0, 1]
R2_val[ny] = 1 - np.sum(pow(Y_cv[:, ny] - Y_ex, 2)) / np.sum(
@@ -996,24 +996,24 @@ def change_permissions_recursive(path, mode):
self.rvDist = []
self.rvVal = []
for nx in range(x_dim):
- rvInfo = inp['randomVariables'][nx]
- self.rvName = self.rvName + [rvInfo['name']]
- self.rvDist = self.rvDist + [rvInfo['distribution']]
+ rvInfo = inp['randomVariables'][nx] # noqa: N806
+ self.rvName = self.rvName + [rvInfo['name']] # noqa: RUF005
+ self.rvDist = self.rvDist + [rvInfo['distribution']] # noqa: RUF005
if do_sampling:
- self.rvVal = self.rvVal + [
+ self.rvVal = self.rvVal + [ # noqa: RUF005
(rvInfo['upperbound'] + rvInfo['lowerbound']) / 2
]
else:
- self.rvVal = self.rvVal + [np.mean(X[:, nx])]
+ self.rvVal = self.rvVal + [np.mean(X[:, nx])] # noqa: RUF005
- def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
+ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt): # noqa: ARG002, C901
warnings.filterwarnings('ignore')
t_opt = time.time()
- m_list = list()
+ m_list = list() # noqa: C408
for ny in range(self.y_dim):
- print(f'y dimension {ny}:')
+ print(f'y dimension {ny}:') # noqa: T201
nopt = 10
#
@@ -1052,7 +1052,7 @@ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
m = m_tmp.copy()
id_opt = 1
- print(f'{1} among {nopt} Log-Likelihood: {m_tmp.log_likelihood()}')
+ print(f'{1} among {nopt} Log-Likelihood: {m_tmp.log_likelihood()}') # noqa: T201
# print(' Calibration time for each: {:.2f} s'.format(time.time() - t_unfix))
if time.time() - t_unfix > self.t_sim_each:
@@ -1064,7 +1064,7 @@ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
for parname in m_tmp.parameter_names():
if parname.endswith('lengthscale'):
- exec('m_tmp.' + parname + '=self.len')
+ exec('m_tmp.' + parname + '=self.len') # noqa: S102
if nugget_opt_tmp == 'Optimize':
m_tmp['Gaussian_noise.variance'].unfix()
@@ -1088,7 +1088,7 @@ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
m = m_tmp.copy()
id_opt = 1
- print(f'{2} among {nopt} Log-Likelihood: {m_tmp.log_likelihood()}')
+ print(f'{2} among {nopt} Log-Likelihood: {m_tmp.log_likelihood()}') # noqa: T201
# print(' Calibration time for each: {:.2f} s'.format(time.time() - t_unfix))
if time.time() - t_unfix > self.t_sim_each:
@@ -1100,14 +1100,14 @@ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
for parname in m_tmp.parameter_names():
if parname.endswith('lengthscale'):
if math.isnan(m.log_likelihood()):
- exec(
+ exec( # noqa: S102
'm_tmp.'
+ parname
+ '=np.random.exponential(1, (1, x_dim)) * m_init.'
+ parname
)
else:
- exec(
+ exec( # noqa: S102
'm_tmp.'
+ parname
+ '=np.random.exponential(1, (1, x_dim)) * m.'
@@ -1127,15 +1127,15 @@ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
elif nugget_opt_tmp == 'Zero':
m_tmp['Gaussian_noise.variance'].constrain_fixed(0)
- t_fix = time.time()
+ t_fix = time.time() # noqa: F841
try:
m_tmp.optimize()
# m_tmp.optimize_restarts(5)
- except Exception as ex:
- print(f'OS error: {ex}')
+ except Exception as ex: # noqa: BLE001
+ print(f'OS error: {ex}') # noqa: T201
- print(
+ print( # noqa: T201
f'{no + 3} among {nopt} Log-Likelihood: {m_tmp.log_likelihood()}'
)
# print(' Calibration time for each: {:.2f} s'.format(time.time() - t_fix))
@@ -1155,8 +1155,8 @@ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
msg = f'Error GP optimization failed for QoI #{ny + 1}'
self.errlog.exit(msg)
- m_list = m_list + [m]
- print(m)
+ m_list = m_list + [m] # noqa: RUF005
+ print(m) # noqa: T201
else:
if nugget_opt_tmp == 'Optimize':
m_tmp_list[ny].gpy_model.mixed_noise.Gaussian_noise.unfix()
@@ -1203,16 +1203,16 @@ def __parameter_calibration(self, m_tmp_list, x_dim, nugget_opt):
id_opt = 0
self.calib_time = (time.time() - t_opt) * round(10 / nopt)
- print(f' Calibration time: {self.calib_time:.2f} s, id_opt={id_opt}')
+ print(f' Calibration time: {self.calib_time:.2f} s, id_opt={id_opt}') # noqa: T201
return m_tmp_list
- def __design_of_experiments(
+ def __design_of_experiments( # noqa: C901, PLR0915
self,
- X,
- Y,
+ X, # noqa: N803
+ Y, # noqa: N803
ac,
- ar,
+ ar, # noqa: ARG002
n_candi,
n_integ,
pre_m_list,
@@ -1225,10 +1225,10 @@ def __design_of_experiments(
if np.min(Y) < 0:
msg = 'Error running SimCenterUQ. Response contains negative values. Please uncheck the log-transform option in the UQ tab'
errlog.exit(msg)
- Y = np.log(Y)
+ Y = np.log(Y) # noqa: N806
if self.do_mf:
- if self.mf_case == 'data-model' or self.mf_case == 'data-data':
+ if self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
if np.min(self.Y_hf) < 0:
msg = 'Error running SimCenterUQ. Response contains negative values. Please uncheck the log-transform option in the UQ tab'
errlog.exit(msg)
@@ -1250,8 +1250,8 @@ def __design_of_experiments(
if not self.do_mf:
m_tmp_list[i].set_XY(X, Y[:, i][np.newaxis].transpose())
else:
- if self.mf_case == 'data-model' or self.mf_case == 'data-data':
- X_list_tmp, Y_list_tmp = (
+ if self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
+ X_list_tmp, Y_list_tmp = ( # noqa: N806
emf.convert_lists_to_array.convert_xy_lists_to_arrays(
[X, self.X_hf],
[
@@ -1261,7 +1261,7 @@ def __design_of_experiments(
)
)
elif self.mf_case == 'model-data':
- X_list_tmp, Y_list_tmp = (
+ X_list_tmp, Y_list_tmp = ( # noqa: N806
emf.convert_lists_to_array.convert_xy_lists_to_arrays(
[self.X_lf, X],
[
@@ -1281,21 +1281,21 @@ def __design_of_experiments(
#
# cross validation errors
#
- Y_pred, Y_pred_var, e2 = self.__get_cross_validation(X, Y, m_list)
+ Y_pred, Y_pred_var, e2 = self.__get_cross_validation(X, Y, m_list) # noqa: N806
if self.do_logtransform:
mu = Y_pred
sig2 = Y_pred_var
median = np.exp(mu)
- mean = np.exp(mu + sig2 / 2)
+ mean = np.exp(mu + sig2 / 2) # noqa: F841
var = np.exp(2 * mu + sig2) * (np.exp(sig2) - 1)
- Y_pred = median
- Y_pred_var = var
+ Y_pred = median # noqa: N806
+ Y_pred_var = var # noqa: N806
if self.do_mf:
- if self.mf_case == 'data-model' or self.mf_case == 'data-data':
+ if self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
self.Y_hf = np.exp(self.Y_hf)
elif self.mf_case == 'model-data':
self.Y_lf = np.exp(self.Y_lf)
@@ -1340,17 +1340,17 @@ def __design_of_experiments(
#
yc1_pred, yc1_var = self.__predict(m_idx, xc1) # use only variance
- score1 = np.zeros(yc1_pred.shape)
+ score1 = np.zeros(yc1_pred.shape) # noqa: F841
cri1 = np.zeros(yc1_pred.shape)
cri2 = np.zeros(yc1_pred.shape)
- # TODO: is this the best?
+ # TODO: is this the best? # noqa: TD002
ll = self.xrange[:, 1] - self.xrange[:, 0]
for i in range(nc1):
if not self.do_mf:
wei = self.weights_node2(xc1[i, :], X, ll)
# phi = e2[closest_node(xc1[i, :], X, ll)]
# phi = e2[self.__closest_node(xc1[i, :], X)]
- elif self.mf_case == 'data-model' or self.mf_case == 'data-data':
+ elif self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
wei = self.weights_node2(xc1[i, :], self.X_hf, ll)
# phi = e2[closest_node(xc1[i, :], self.X_hf, ll)]
# phi = e2[self.__closest_node(xc1[i, :], self.X_hf)]
@@ -1363,7 +1363,7 @@ def __design_of_experiments(
cri2[i] = sum(e2[:, y_idx] / Y_pred_var[:, y_idx] * wei.T)
# cri2[i] = pow(phi[y_idx],r)
- VOI = np.zeros(yc1_pred.shape)
+ VOI = np.zeros(yc1_pred.shape) # noqa: N806
for i in range(nc1):
pdfvals = (
m_idx.kern.K(np.array([xq[i]]), xq) ** 2
@@ -1380,11 +1380,11 @@ def __design_of_experiments(
logcrimi1 = np.log(cri1[:, 0])
logcrimi2 = np.log(cri2[:, 0])
- idx_pareto_front = list()
+ idx_pareto_front = list() # noqa: C408, F841
rankid = np.zeros(nc1)
- varRank = np.zeros(nc1)
- biasRank = np.zeros(nc1)
- for id in range(nc1):
+ varRank = np.zeros(nc1) # noqa: N806
+ biasRank = np.zeros(nc1) # noqa: N806
+ for id in range(nc1): # noqa: A001
idx_tmp = np.argwhere(
(logcrimi1 >= logcrimi1[id]) * (logcrimi2 >= logcrimi2[id])
)
@@ -1392,11 +1392,11 @@ def __design_of_experiments(
biasRank[id] = np.sum(logcrimi2 >= logcrimi2[id])
rankid[id] = idx_tmp.size
- idx_rank = np.argsort(rankid)
- sort_rank = np.sort(rankid)
+ idx_rank = np.argsort(rankid) # noqa: F841
+ sort_rank = np.sort(rankid) # noqa: F841
num_1rank = np.sum(rankid == 1)
idx_1rank = list((np.argwhere(rankid == 1)).flatten())
- npareto = 4
+ npareto = 4 # noqa: F841
if num_1rank < self.cal_interval:
prob = np.ones((nc1,))
@@ -1407,8 +1407,8 @@ def __design_of_experiments(
)
else:
idx_pareto_candi = idx_1rank.copy()
- X_tmp = X
- Y_tmp = Y[:, y_idx][np.newaxis].T
+ X_tmp = X # noqa: N806
+ Y_tmp = Y[:, y_idx][np.newaxis].T # noqa: N806
m_tmp = m_idx.copy()
# get MMSEw
@@ -1420,12 +1420,12 @@ def __design_of_experiments(
idx_pareto_new = [best_global]
del idx_pareto_candi[best_local]
- for i in range(self.cal_interval - 1):
- X_tmp = np.vstack([X_tmp, xc1[best_global, :][np.newaxis]])
+ for i in range(self.cal_interval - 1): # noqa: B007
+ X_tmp = np.vstack([X_tmp, xc1[best_global, :][np.newaxis]]) # noqa: N806
# any variables
- Y_tmp = np.vstack([Y_tmp, np.array([[0]])])
+ Y_tmp = np.vstack([Y_tmp, np.array([[0]])]) # noqa: N806
m_tmp.set_XY(X=X_tmp, Y=Y_tmp)
- dummy, Yq_var = m_tmp.predict(xc1[idx_pareto_candi, :])
+ dummy, Yq_var = m_tmp.predict(xc1[idx_pareto_candi, :]) # noqa: N806
cri1 = Yq_var * VOI[idx_pareto_candi]
cri1 = (cri1 - np.min(cri1)) / (np.max(cri1) - np.min(cri1))
score_tmp = (
@@ -1434,7 +1434,7 @@ def __design_of_experiments(
best_local = np.argsort(-np.squeeze(score_tmp))[0]
best_global = idx_pareto_candi[best_local]
- idx_pareto_new = idx_pareto_new + [best_global]
+ idx_pareto_new = idx_pareto_new + [best_global] # noqa: RUF005
del idx_pareto_candi[best_local]
# score_tmp = Yq_var * cri2[idx_pareto_left]/Y_pred_var[closest_node(xc1[i, :], X, self.m_list, self.xrange)]
@@ -1443,7 +1443,7 @@ def __design_of_experiments(
idx_pareto = idx_pareto_new
update_point = xc1[idx_pareto, :]
- update_IMSE = 0
+ update_IMSE = 0 # noqa: N806
# import matplotlib.pyplot as plt
# plt.plot(logcrimi1, logcrimi2, 'x');plt.plot(logcrimi1[idx_pareto], logcrimi2[idx_pareto], 'x'); plt.show()
@@ -1468,16 +1468,16 @@ def __design_of_experiments(
idx_pareto2 = np.asarray(random_indices)
idx_pareto = np.asarray(idx_pareto)
idx_pareto = list(idx_pareto[idx_pareto2[0:self.cal_interval]])
- """
+ """ # noqa: W293
elif self.doe_method == 'imsew':
nq = round(n_integ)
m_stack = m_idx.copy()
- X_stack = X
- Y_stack = Y
+ X_stack = X # noqa: N806
+ Y_stack = Y # noqa: N806
update_point = np.zeros((self.cal_interval, self.x_dim))
- update_IMSE = np.zeros((self.cal_interval, 1))
+ update_IMSE = np.zeros((self.cal_interval, 1)) # noqa: N806
#
# Initial candidates
@@ -1499,7 +1499,7 @@ def __design_of_experiments(
self.xrange[nx, 0], self.xrange[nx, 1], (1, nq)
)
- # TODO: is diff(xrange) the best?
+ # TODO: is diff(xrange) the best? # noqa: TD002
ll = self.xrange[:, 1] - self.xrange[:, 0]
phiq = np.zeros((nq, y_dim))
for i in range(nq):
@@ -1514,29 +1514,29 @@ def __design_of_experiments(
for i in range(nc1)
)
result_objs = list(self.pool.starmap(imse, iterables))
- IMSEc1 = np.zeros(nc1)
- for IMSE_val, idx in result_objs:
+ IMSEc1 = np.zeros(nc1) # noqa: N806
+ for IMSE_val, idx in result_objs: # noqa: N806
IMSEc1[idx] = IMSE_val
- print(
+ print( # noqa: T201
f'IMSE: finding the next DOE {ni} in a parallel way.. time = {time.time() - tmp}'
) # 7s # 3-4s
else:
tmp = time.time()
phiqr = pow(phiq[:, y_idx], r)
- IMSEc1 = np.zeros(nc1)
+ IMSEc1 = np.zeros(nc1) # noqa: N806
for i in range(nc1):
IMSEc1[i], dummy = imse(
m_stack.copy(), xc1[i, :][np.newaxis], xq, phiqr, i
)
- print(
+ print( # noqa: T201
f'IMSE: finding the next DOE {ni} in a serial way.. time = {time.time() - tmp}'
) # 4s
new_idx = np.argmin(IMSEc1, axis=0)
x_point = xc1[new_idx, :][np.newaxis]
- X_stack = np.vstack([X_stack, x_point])
- Y_stack = np.zeros(
+ X_stack = np.vstack([X_stack, x_point]) # noqa: N806
+ Y_stack = np.zeros( # noqa: N806
(Y_stack.shape[0] + 1, Y.shape[1])
) # any variables
m_stack.set_XY(X=X_stack, Y=Y_stack)
@@ -1627,11 +1627,11 @@ def __design_of_experiments(
update_point = xc3[new_idx, :][np.newaxis]
update_IMSE = IMSE[new_idx]
- """
+ """ # noqa: W293
elif self.doe_method == 'random':
update_point = xc1[0 : self.cal_interval, :]
- update_IMSE = 0
+ update_IMSE = 0 # noqa: N806
elif self.doe_method == 'mmse':
sort_idx_score1 = np.argsort(
@@ -1641,7 +1641,7 @@ def __design_of_experiments(
xc2 = xc1[sort_idx_score1[0, 0:nc2], :]
update_point = xc2[0:1, :]
- update_IMSE = 0
+ update_IMSE = 0 # noqa: N806
elif self.doe_method == 'mmsew':
#
@@ -1661,21 +1661,21 @@ def __design_of_experiments(
phicr = pow(phic[:, y_idx], r)
- X_stack = X
- Y_stack = Y
+ X_stack = X # noqa: N806
+ Y_stack = Y # noqa: N806
update_point = np.zeros((self.cal_interval, self.x_dim))
- update_IMSE = np.zeros((self.cal_interval, 1))
+ update_IMSE = np.zeros((self.cal_interval, 1)) # noqa: N806
for ni in range(self.cal_interval):
yc1_pred, yc1_var = m_stack.predict(xc1) # use only variance
- MMSEc1 = yc1_var.flatten() * phicr.flatten()
+ MMSEc1 = yc1_var.flatten() * phicr.flatten() # noqa: N806
new_idx = np.argmax(MMSEc1, axis=0)
x_point = xc1[new_idx, :][np.newaxis]
- X_stack = np.vstack([X_stack, x_point])
- Y_stack = np.zeros(
+ X_stack = np.vstack([X_stack, x_point]) # noqa: N806
+ Y_stack = np.zeros( # noqa: N806
(Y_stack.shape[0] + 1, Y.shape[1])
) # any variables
m_stack.set_XY(X=X_stack, Y=Y_stack)
@@ -1695,8 +1695,8 @@ def __design_of_experiments(
def __normalized_mean_sq_error(self, yp, ye):
nt = yp.shape[0]
data_bound = np.max(ye, axis=0) - np.min(ye, axis=0)
- RMSE = np.sqrt(1 / nt * np.sum(pow(yp - ye, 2), axis=0))
- NRMSE = RMSE / data_bound
+ RMSE = np.sqrt(1 / nt * np.sum(pow(yp - ye, 2), axis=0)) # noqa: N806
+ NRMSE = RMSE / data_bound # noqa: N806
NRMSE[np.argwhere(data_bound == 0)] = 0
return NRMSE
@@ -1714,14 +1714,14 @@ def __closest_node(self, node, nodes):
dist_2 = np.einsum('ij,ij->i', deltas_norm, deltas_norm)
return np.argmin(dist_2)
- def __from_XY_into_list(self, X, Y):
- x_list = list()
- y_list = list()
+ def __from_XY_into_list(self, X, Y): # noqa: N802, N803
+ x_list = list() # noqa: C408
+ y_list = list() # noqa: C408
for i in range(Y.shape[1]):
- x_list = x_list + [
+ x_list = x_list + [ # noqa: RUF005
X,
]
- y_list = y_list + [
+ y_list = y_list + [ # noqa: RUF005
Y[
:,
[
@@ -1731,52 +1731,52 @@ def __from_XY_into_list(self, X, Y):
]
return x_list, y_list
- def __predict(self, m, X):
- if not self.do_mf:
+ def __predict(self, m, X): # noqa: N803
+ if not self.do_mf: # noqa: RET503
return m.predict(X)
- elif self.mf_case == 'data-model' or self.mf_case == 'data-data':
- X_list = convert_x_list_to_array([X, X])
- X_list_l = X_list[: X.shape[0]]
- X_list_h = X_list[X.shape[0] :]
+ elif self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: RET505, PLR1714
+ X_list = convert_x_list_to_array([X, X]) # noqa: N806
+ X_list_l = X_list[: X.shape[0]] # noqa: N806
+ X_list_h = X_list[X.shape[0] :] # noqa: N806
return m.predict(X_list_h)
elif self.mf_case == 'model-data':
# return m.predict(X)
- X_list = convert_x_list_to_array([X, X])
- X_list_l = X_list[: X.shape[0]]
- X_list_h = X_list[X.shape[0] :]
+ X_list = convert_x_list_to_array([X, X]) # noqa: N806
+ X_list_l = X_list[: X.shape[0]] # noqa: N806, F841
+ X_list_h = X_list[X.shape[0] :] # noqa: N806
return m.predict(X_list_h)
- def __get_cross_validation(self, X, Y, m_list):
+ def __get_cross_validation(self, X, Y, m_list): # noqa: N803
if not self.do_mf:
e2 = np.zeros(Y.shape)
- Y_pred = np.zeros(Y.shape)
- Y_pred_var = np.zeros(Y.shape)
+ Y_pred = np.zeros(Y.shape) # noqa: N806
+ Y_pred_var = np.zeros(Y.shape) # noqa: N806
for ny in range(Y.shape[1]):
m_tmp = m_list[ny].copy()
for ns in range(X.shape[0]):
- X_tmp = np.delete(X, ns, axis=0)
- Y_tmp = np.delete(Y, ns, axis=0)
+ X_tmp = np.delete(X, ns, axis=0) # noqa: N806
+ Y_tmp = np.delete(Y, ns, axis=0) # noqa: N806
m_tmp.set_XY(X=X_tmp, Y=Y_tmp[:, ny][np.newaxis].transpose())
x_loo = X[ns, :][np.newaxis]
# Y_pred_tmp, Y_err_tmp = m_tmp.predict(x_loo)
- Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp, x_loo)
+ Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp, x_loo) # noqa: N806
Y_pred[ns, ny] = Y_pred_tmp
Y_pred_var[ns, ny] = Y_err_tmp
e2[ns, ny] = pow(
(Y_pred[ns, ny] - Y[ns, ny]), 2
) # for nD outputs
- elif self.mf_case == 'data-model' or self.mf_case == 'data-data':
+ elif self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
e2 = np.zeros(self.Y_hf.shape)
- Y_pred = np.zeros(self.Y_hf.shape)
- Y_pred_var = np.zeros(self.Y_hf.shape)
+ Y_pred = np.zeros(self.Y_hf.shape) # noqa: N806
+ Y_pred_var = np.zeros(self.Y_hf.shape) # noqa: N806
for ny in range(Y.shape[1]):
m_tmp = deepcopy(m_list[ny])
for ns in range(self.X_hf.shape[0]):
- X_hf_tmp = np.delete(self.X_hf, ns, axis=0)
- Y_hf_tmp = np.delete(self.Y_hf, ns, axis=0)
- X_list_tmp, Y_list_tmp = (
+ X_hf_tmp = np.delete(self.X_hf, ns, axis=0) # noqa: N806
+ Y_hf_tmp = np.delete(self.Y_hf, ns, axis=0) # noqa: N806
+ X_list_tmp, Y_list_tmp = ( # noqa: N806
emf.convert_lists_to_array.convert_xy_lists_to_arrays(
[X, X_hf_tmp],
[
@@ -1787,7 +1787,7 @@ def __get_cross_validation(self, X, Y, m_list):
)
m_tmp.set_data(X=X_list_tmp, Y=Y_list_tmp)
x_loo = self.X_hf[ns][np.newaxis]
- Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp, x_loo)
+ Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp, x_loo) # noqa: N806
Y_pred[ns, ny] = Y_pred_tmp
Y_pred_var[ns, ny] = Y_err_tmp
e2[ns, ny] = pow(
@@ -1796,15 +1796,15 @@ def __get_cross_validation(self, X, Y, m_list):
elif self.mf_case == 'model-data':
e2 = np.zeros(Y.shape)
- Y_pred = np.zeros(Y.shape)
- Y_pred_var = np.zeros(Y.shape)
+ Y_pred = np.zeros(Y.shape) # noqa: N806
+ Y_pred_var = np.zeros(Y.shape) # noqa: N806
for ny in range(Y.shape[1]):
m_tmp = deepcopy(m_list[ny])
for ns in range(X.shape[0]):
- X_tmp = np.delete(X, ns, axis=0)
- Y_tmp = np.delete(Y, ns, axis=0)
- X_list_tmp, Y_list_tmp = (
+ X_tmp = np.delete(X, ns, axis=0) # noqa: N806
+ Y_tmp = np.delete(Y, ns, axis=0) # noqa: N806
+ X_list_tmp, Y_list_tmp = ( # noqa: N806
emf.convert_lists_to_array.convert_xy_lists_to_arrays(
[self.X_lf, X_tmp],
[
@@ -1816,7 +1816,7 @@ def __get_cross_validation(self, X, Y, m_list):
m_tmp.set_data(X=X_list_tmp, Y=Y_list_tmp)
# x_loo = np.hstack((X[ns], 1))[np.newaxis]
x_loo = self.X_hf[ns][np.newaxis]
- Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp, x_loo)
+ Y_pred_tmp, Y_err_tmp = self.__predict(m_tmp, x_loo) # noqa: N806
Y_pred[ns, ny] = Y_pred_tmp
Y_pred_var[ns, ny] = Y_err_tmp
e2[ns, ny] = pow(
@@ -1825,16 +1825,16 @@ def __get_cross_validation(self, X, Y, m_list):
return Y_pred, Y_pred_var, e2
- def term(self):
+ def term(self): # noqa: D102
if self.do_parallel:
if self.run_type != 'runningLocal':
- print('RUNNING SUCCESSFUL')
+ print('RUNNING SUCCESSFUL') # noqa: T201
self.world.Abort(0) # to prevent deadlock
- def save_model(self, filename):
+ def save_model(self, filename): # noqa: C901, D102, PLR0915
import json
- with open(self.work_dir + '/' + filename + '.pkl', 'wb') as file:
+ with open(self.work_dir + '/' + filename + '.pkl', 'wb') as file: # noqa: PTH123
pickle.dump(self.m_list, file)
# json.dump(self.m_list, file)
@@ -1964,7 +1964,7 @@ def save_model(self, filename):
for ny in range(self.y_dim):
if not self.do_mf:
results['yExact'][self.g_name[ny]] = self.Y[:, ny].tolist()
- elif self.mf_case == 'data-model' or self.mf_case == 'data-data':
+ elif self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
results['yExact'][self.g_name[ny]] = self.Y_hf[:, ny].tolist()
elif self.mf_case == 'model-data':
results['yExact'][self.g_name[ny]] = self.Y[:, ny].tolist()
@@ -2037,7 +2037,7 @@ def save_model(self, filename):
results['outData'] = self.outData
if self.do_mf:
- if self.mf_case == 'data-model' or self.mf_case == 'data-data':
+ if self.mf_case == 'data-model' or self.mf_case == 'data-data': # noqa: PLR1714
results['inpData_HF'] = self.inpData_hf
results['outData_HF'] = self.outData_hf
results['valSamp_HF'] = self.X_hf.shape[0]
@@ -2053,7 +2053,7 @@ def save_model(self, filename):
rvs['name'] = self.rvName[nx]
rvs['distribution'] = self.rvDist[nx]
rvs['value'] = self.rvVal[nx]
- rv_list = rv_list + [rvs]
+ rv_list = rv_list + [rvs] # noqa: RUF005
results['randomVariables'] = rv_list
# Used for surrogate
@@ -2064,13 +2064,13 @@ def save_model(self, filename):
results['modelInfo'][self.g_name[ny]] = {}
for parname in self.m_list[ny].parameter_names():
results['modelInfo'][self.g_name[ny]][parname] = list(
- eval('self.m_list[ny].' + parname)
+ eval('self.m_list[ny].' + parname) # noqa: S307
)
- with open(self.work_dir + '/dakota.out', 'w') as fp:
+ with open(self.work_dir + '/dakota.out', 'w') as fp: # noqa: PTH123
json.dump(results, fp, indent=1)
- with open(self.work_dir + '/GPresults.out', 'w') as file:
+ with open(self.work_dir + '/GPresults.out', 'w') as file: # noqa: PTH123
file.write('* Problem setting\n')
file.write(f' - dimension of x : {self.x_dim}\n')
file.write(f' - dimension of y : {self.y_dim}\n')
@@ -2120,7 +2120,7 @@ def save_model(self, filename):
m_tmp = self.m_list[ny]
for parname in m_tmp.parameter_names():
file.write(f' - {parname} ')
- parvals = eval('m_tmp.' + parname)
+ parvals = eval('m_tmp.' + parname) # noqa: S307
if len(parvals) == self.x_dim:
file.write('\n')
for nx in range(self.x_dim):
@@ -2133,10 +2133,10 @@ def save_model(self, filename):
file.close()
- print('Results Saved')
+ print('Results Saved') # noqa: T201
return 0
- def weights_node2(self, node, nodes, ls):
+ def weights_node2(self, node, nodes, ls): # noqa: D102
nodes = np.asarray(nodes)
deltas = nodes - node
@@ -2152,8 +2152,8 @@ def weights_node2(self, node, nodes, ls):
return weig / sum(weig)
-def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver):
- X = np.atleast_2d(X)
+def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver): # noqa: N802, N803, D103
+ X = np.atleast_2d(X) # noqa: N806
x_dim = X.shape[1]
if X.shape[0] > 1:
@@ -2164,17 +2164,17 @@ def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver):
# (1) create "workdir.idx " folder :need C++17 to use the files system namespace
current_dir_i = work_dir + '/workdir.' + str(id_sim + 1)
- print(id_sim)
+ print(id_sim) # noqa: T201
try:
shutil.copytree(work_dir + '/templatedir', current_dir_i)
- except Exception as ex:
+ except Exception as ex: # noqa: BLE001
errlog = errorLog(work_dir)
msg = 'Error running FEM: ' + str(ex)
errlog.exit(msg)
# (2) write param.in file
- outF = open(current_dir_i + '/params.in', 'w')
+ outF = open(current_dir_i + '/params.in', 'w') # noqa: SIM115, PTH123, N806
outF.write(f'{x_dim}\n')
for i in range(x_dim):
@@ -2185,10 +2185,10 @@ def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver):
os.chdir(current_dir_i)
workflow_run_command = f'{current_dir_i}/{workflowDriver}'
- subprocess.check_call(workflow_run_command, shell=True)
+ subprocess.check_call(workflow_run_command, shell=True) # noqa: S602
# (4) reading results
- if glob.glob('results.out'):
+ if glob.glob('results.out'): # noqa: PTH207
g = np.loadtxt('results.out').flatten()
else:
errlog = errorLog(work_dir)
@@ -2210,20 +2210,20 @@ def run_FEM(X, id_sim, rv_name, work_dir, workflowDriver):
return g, id_sim
-def run_FEM_batch(
- X,
+def run_FEM_batch( # noqa: N802, D103
+ X, # noqa: N803
id_sim,
rv_name,
do_parallel,
y_dim,
- os_type,
- run_type,
+ os_type, # noqa: ARG001
+ run_type, # noqa: ARG001
pool,
t_init,
t_thr,
- workflowDriver,
+ workflowDriver, # noqa: N803
):
- X = np.atleast_2d(X)
+ X = np.atleast_2d(X) # noqa: N806
# Windows
# if os_type.lower().startswith('win'):
# workflowDriver = "workflow_driver.bat"
@@ -2232,20 +2232,20 @@ def run_FEM_batch(
nsamp = X.shape[0]
if not do_parallel:
- Y = np.zeros((nsamp, y_dim))
+ Y = np.zeros((nsamp, y_dim)) # noqa: N806
for ns in range(nsamp):
Y[ns, :], id_sim_current = run_FEM(
X[ns, :], id_sim + ns, rv_name, work_dir, workflowDriver
)
if time.time() - t_init > t_thr:
- X = X[:ns, :]
- Y = Y[:ns, :]
+ X = X[:ns, :] # noqa: N806
+ Y = Y[:ns, :] # noqa: N806
break
return X, Y, id_sim_current + 1
if do_parallel:
- print(f'Running {nsamp} simulations in parallel')
+ print(f'Running {nsamp} simulations in parallel') # noqa: T201
tmp = time.time()
iterables = (
(X[i, :][np.newaxis], id_sim + i, rv_name, work_dir, workflowDriver)
@@ -2253,67 +2253,67 @@ def run_FEM_batch(
)
try:
result_objs = list(pool.starmap(run_FEM, iterables))
- print(f'Simulation time = {time.time() - tmp} s')
+ print(f'Simulation time = {time.time() - tmp} s') # noqa: T201
tmp = time.time()
except KeyboardInterrupt:
- print('Ctrl+c received, terminating and joining pool.')
+ print('Ctrl+c received, terminating and joining pool.') # noqa: T201
try:
pool.shutdown()
- except Exception:
+ except Exception: # noqa: BLE001
sys.exit()
tmp = time.time()
- print('=====================================')
- Nsim = len(list(result_objs))
- Y = np.zeros((Nsim, y_dim))
+ print('=====================================') # noqa: T201
+ Nsim = len(list(result_objs)) # noqa: N806
+ Y = np.zeros((Nsim, y_dim)) # noqa: N806
- for val, id in result_objs:
+ for val, id in result_objs: # noqa: A001
if np.isnan(np.sum(val)):
- Nsim = id - id_sim
- X = X[:Nsim, :]
- Y = Y[:Nsim, :]
+ Nsim = id - id_sim # noqa: N806
+ X = X[:Nsim, :] # noqa: N806
+ Y = Y[:Nsim, :] # noqa: N806
else:
Y[id - id_sim, :] = val
return X, Y, id_sim + Nsim
-def read_txt(text_dir, errlog):
- if not os.path.exists(text_dir):
+def read_txt(text_dir, errlog): # noqa: D103
+ if not os.path.exists(text_dir): # noqa: PTH110
msg = 'Error: file does not exist: ' + text_dir
errlog.exit(msg)
- with open(text_dir) as f:
+ with open(text_dir) as f: # noqa: PTH123
# Iterate through the file until the table starts
header_count = 0
for line in f:
if line.startswith('%'):
header_count = header_count + 1
- print(line)
+ print(line) # noqa: T201
# X = np.loadtxt(f, skiprows=header_count, delimiter=',')
try:
- with open(text_dir) as f:
- X = np.loadtxt(f, skiprows=header_count)
+ with open(text_dir) as f: # noqa: PTH123, PLW2901
+ X = np.loadtxt(f, skiprows=header_count) # noqa: N806
except ValueError:
- with open(text_dir) as f:
+ with open(text_dir) as f: # noqa: PTH123, PLW2901
try:
- X = np.genfromtxt(f, skip_header=header_count, delimiter=',')
+ X = np.genfromtxt(f, skip_header=header_count, delimiter=',') # noqa: N806
# if there are extra delimiter, remove nan
if np.isnan(X[-1, -1]):
- X = np.delete(X, -1, 1)
+ X = np.delete(X, -1, 1) # noqa: N806
# X = np.loadtxt(f, skiprows=header_count, delimiter=',')
except ValueError:
msg = 'Error: file format is not supported ' + text_dir
errlog.exit(msg)
if X.ndim == 1:
- X = np.array([X]).transpose()
+ X = np.array([X]).transpose() # noqa: N806
return X
-def closest_node(node, nodes, ll):
+def closest_node(node, nodes, ll): # noqa: D103
nodes = np.asarray(nodes)
deltas = nodes - node
deltas_norm = np.zeros(deltas.shape)
@@ -2324,14 +2324,14 @@ def closest_node(node, nodes, ll):
return np.argmin(dist_2)
-def imse(m_tmp, xcandi, xq, phiqr, i):
- X = m_tmp.X
- Y = m_tmp.Y
- X_tmp = np.vstack([X, xcandi])
- Y_tmp = np.zeros((Y.shape[0] + 1, Y.shape[1])) # any variables
+def imse(m_tmp, xcandi, xq, phiqr, i): # noqa: D103
+ X = m_tmp.X # noqa: N806
+ Y = m_tmp.Y # noqa: N806
+ X_tmp = np.vstack([X, xcandi]) # noqa: N806
+ Y_tmp = np.zeros((Y.shape[0] + 1, Y.shape[1])) # any variables # noqa: N806
m_tmp.set_XY(X=X_tmp, Y=Y_tmp)
- dummy, Yq_var = m_tmp.predict(xq)
- IMSEc1 = 1 / xq.shape[0] * sum(phiqr.flatten() * Yq_var.flatten())
+ dummy, Yq_var = m_tmp.predict(xq) # noqa: N806
+ IMSEc1 = 1 / xq.shape[0] * sum(phiqr.flatten() * Yq_var.flatten()) # noqa: N806
return IMSEc1, i
@@ -2339,26 +2339,26 @@ def imse(m_tmp, xcandi, xq, phiqr, i):
# ==========================================================================================
-class errorLog:
+class errorLog: # noqa: D101
def __init__(self, work_dir):
- self.file = open(f'{work_dir}/dakota.err', 'w')
+ self.file = open(f'{work_dir}/dakota.err', 'w') # noqa: SIM115, PTH123
- def exit(self, msg):
- print(msg)
+ def exit(self, msg): # noqa: D102
+ print(msg) # noqa: T201
self.file.write(msg)
self.file.close()
- exit(-1)
+ exit(-1) # noqa: PLR1722
- def terminate(self):
+ def terminate(self): # noqa: D102
self.file.close()
-def build_surrogate(work_dir, inputFile, workflowDriver, os_type, run_type):
+def build_surrogate(work_dir, inputFile, workflowDriver, os_type, run_type): # noqa: N803, D103
# t_total = time.process_time()
filename = 'SimGpModel'
- print('FILE: ' + work_dir + '/templatedir/' + inputFile)
- f = open(work_dir + '/templatedir/' + inputFile)
+ print('FILE: ' + work_dir + '/templatedir/' + inputFile) # noqa: T201
+ f = open(work_dir + '/templatedir/' + inputFile) # noqa: SIM115, PTH123
try:
inp = json.load(f)
except ValueError:
@@ -2389,13 +2389,13 @@ def build_surrogate(work_dir, inputFile, workflowDriver, os_type, run_type):
# the actual execution
if __name__ == '__main__':
- inputArgs = sys.argv
+ inputArgs = sys.argv # noqa: N816
work_dir = inputArgs[1].replace(os.sep, '/')
errlog = errorLog(work_dir)
- inputFile = inputArgs[2]
- workflowDriver = inputArgs[3]
+ inputFile = inputArgs[2] # noqa: N816
+ workflowDriver = inputArgs[3] # noqa: N816
os_type = inputArgs[4]
run_type = inputArgs[5]
diff --git a/modules/performUQ/SimCenterUQ/runPLoM.py b/modules/performUQ/SimCenterUQ/runPLoM.py
index 881017162..08f3ac35f 100644
--- a/modules/performUQ/SimCenterUQ/runPLoM.py
+++ b/modules/performUQ/SimCenterUQ/runPLoM.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2021 Leland Stanford Junior University
# Copyright (c) 2021 The Regents of the University of California
#
@@ -49,7 +49,7 @@
import numpy as np
import pandas as pd
-from PLoM.PLoM import *
+from PLoM.PLoM import * # noqa: F403
# ==========================================================================================
@@ -64,7 +64,7 @@ class runPLoM:
_load_variables: load training data
train_model: model training
save_model: model saving
- """
+ """ # noqa: D205, D400
def __init__(
self,
@@ -83,7 +83,7 @@ def __init__(
os_type: operating system type
job_config: configuration (dtype = dict)
errlog: error log object
- """
+ """ # noqa: D205, D400
# read inputs
self.work_dir = work_dir
self.run_type = run_type
@@ -94,8 +94,8 @@ def __init__(
self.workflow_driver = workflow_driver
# initialization
- self.rv_name = list()
- self.g_name = list()
+ self.rv_name = list() # noqa: C408
+ self.g_name = list() # noqa: C408
self.x_dim = 0
self.y_dim = 0
@@ -103,7 +103,7 @@ def __init__(
# self.x_dim, self.y_dim, self.rv_name, self.g_name = self._create_variables(job_config)
# read PLoM parameters
- surrogateInfo = job_config['UQ']['surrogateMethodInfo']
+ surrogateInfo = job_config['UQ']['surrogateMethodInfo'] # noqa: N806
if self._parse_plom_parameters(surrogateInfo):
msg = 'runPLoM.__init__: Error in reading PLoM parameters.'
self.errlog.exit(msg)
@@ -123,10 +123,10 @@ def __init__(
do_sampling = False
do_simulation = not surrogateInfo['outputData']
self.doe_method = 'None' # default
- do_doe = False
- self.inpData = os.path.join(work_dir, 'templatedir/inpFile.in')
+ do_doe = False # noqa: F841
+ self.inpData = os.path.join(work_dir, 'templatedir/inpFile.in') # noqa: PTH118
if not do_simulation:
- self.outData = os.path.join(work_dir, 'templatedir/outFile.in')
+ self.outData = os.path.join(work_dir, 'templatedir/outFile.in') # noqa: PTH118
self._create_variables_from_input()
elif surrogateInfo['method'] == 'Sampling and Simulation':
# run simulation first to generate training data
@@ -145,41 +145,41 @@ def __init__(
msg = 'runPLoM.__init__: Error in loading variables.'
self.errlog.exit(msg)
- def _run_simulation(self):
+ def _run_simulation(self): # noqa: C901
"""_run_simulation: running simulation to get training data
input:
job_config: job configuration dictionary
output:
None
- """
+ """ # noqa: D205, D400
import platform
job_config = self.job_config
# get python instance
- runType = job_config.get('runType', 'runningLocal')
+ runType = job_config.get('runType', 'runningLocal') # noqa: N806
if (
sys.platform == 'darwin'
or sys.platform == 'linux'
or sys.platform == 'linux2'
):
- pythonEXE = 'python3'
+ pythonEXE = 'python3' # noqa: N806
else:
- pythonEXE = 'python'
+ pythonEXE = 'python' # noqa: N806
if runType == 'runningLocal' and platform.system() == 'Windows':
- localAppDir = job_config.get('localAppDir', None)
+ localAppDir = job_config.get('localAppDir', None) # noqa: N806
if localAppDir is None:
# no local app directory is found, let's try to use system python
pass
else:
# pythonEXE = os.path.join(localAppDir,'applications','python','python.exe')
- pythonEXE = '"' + sys.executable + '"'
+ pythonEXE = '"' + sys.executable + '"' # noqa: N806
else:
# for remote run and macOS, let's use system python
pass
# move into the templatedir
- run_dir = job_config.get('runDir', os.getcwd())
+ run_dir = job_config.get('runDir', os.getcwd()) # noqa: PTH109
os.chdir(run_dir)
# training is done for single building (for now)
bldg_id = None
@@ -188,60 +188,60 @@ def _run_simulation(self):
os.chdir('templatedir')
# dakota script path
- dakotaScript = os.path.join(
- os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
+ dakotaScript = os.path.join( # noqa: PTH118, N806
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))), # noqa: PTH100, PTH120
'dakota',
'DakotaUQ.py',
)
- print('dakotaScript = ', dakotaScript)
+ print('dakotaScript = ', dakotaScript) # noqa: T201
# write a new dakota.json for forward propagation
# KZ modified 0331
- with open(self.input_file, encoding='utf-8') as f:
+ with open(self.input_file, encoding='utf-8') as f: # noqa: PTH123
tmp = json.load(f)
tmp['UQ']['uqType'] = 'Forward Propagation'
tmp['UQ']['parallelExecution'] = True
- samplingObj = tmp['UQ']['surrogateMethodInfo']['samplingMethod']
- tmp['UQ']['samplingMethodData'] = dict()
+ samplingObj = tmp['UQ']['surrogateMethodInfo']['samplingMethod'] # noqa: N806
+ tmp['UQ']['samplingMethodData'] = dict() # noqa: C408
# KZ modified 0331
tmp['UQ']['uqEngine'] = 'Dakota'
tmp['Applications']['UQ']['Application'] = 'Dakota-UQ'
for key, item in samplingObj.items():
tmp['UQ']['samplingMethodData'][key] = item
- with open('sc_dakota_plom.json', 'w', encoding='utf-8') as f:
+ with open('sc_dakota_plom.json', 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(tmp, f, indent=2)
# command line
# KZ modified 0331
- command_line = f'{pythonEXE} {dakotaScript} --workflowInput sc_dakota_plom.json --driverFile {os.path.splitext(self.workflow_driver)[0]} --workflowOutput EDP.json --runType {runType}'
- print(command_line)
+ command_line = f'{pythonEXE} {dakotaScript} --workflowInput sc_dakota_plom.json --driverFile {os.path.splitext(self.workflow_driver)[0]} --workflowOutput EDP.json --runType {runType}' # noqa: PTH122
+ print(command_line) # noqa: T201
# run command
- dakotaTabPath = os.path.join(self.work_dir, 'dakotaTab.out')
- print(dakotaTabPath)
+ dakotaTabPath = os.path.join(self.work_dir, 'dakotaTab.out') # noqa: PTH118, N806
+ print(dakotaTabPath) # noqa: T201
try:
- os.system(command_line)
- except:
- print(
+ os.system(command_line) # noqa: S605
+ except: # noqa: E722
+ print( # noqa: T201
'runPLoM._run_simulation: error in running dakota to generate the initial sample.'
)
- print(
+ print( # noqa: T201
'runPLoM._run_simulation: please check if the dakota is installed correctly on the system.'
)
- if not os.path.exists(dakotaTabPath):
+ if not os.path.exists(dakotaTabPath): # noqa: PTH110
try:
- subprocess.call(command_line)
- except:
- print(
+ subprocess.call(command_line) # noqa: S603
+ except: # noqa: E722
+ print( # noqa: T201
'runPLoM._run_simulation: error in running dakota to generate the initial sample.'
)
- print(
+ print( # noqa: T201
'runPLoM._run_simulation: please check if the dakota is installed correctly on the system.'
)
- if not os.path.exists(dakotaTabPath):
+ if not os.path.exists(dakotaTabPath): # noqa: PTH110
msg = 'Dakota preprocessor did not run successfully'
self.errlog.exit(msg)
@@ -264,7 +264,7 @@ def _run_simulation(self):
self.inpData, self.outData = self._prepare_training_data(run_dir)
# update job_config['randomVariables']
cur_rv_list = [x.get('name') for x in job_config['randomVariables']]
- for curRV in self.rv_name:
+ for curRV in self.rv_name: # noqa: N806
if curRV not in cur_rv_list:
job_config['randomVariables'].append(
{'distribution': 'Normal', 'name': curRV}
@@ -274,20 +274,20 @@ def _run_simulation(self):
elif self.run_type in ['set_up', 'runningRemote']:
pass
- def _prepare_training_data(self, run_dir):
+ def _prepare_training_data(self, run_dir): # noqa: C901
# load IM.csv if exists
- df_IM = pd.DataFrame()
- if os.path.exists(os.path.join(run_dir, 'IM.csv')):
- df_IM = pd.read_csv(os.path.join(run_dir, 'IM.csv'), index_col=None)
+ df_IM = pd.DataFrame() # noqa: N806
+ if os.path.exists(os.path.join(run_dir, 'IM.csv')): # noqa: PTH110, PTH118
+ df_IM = pd.read_csv(os.path.join(run_dir, 'IM.csv'), index_col=None) # noqa: PTH118, N806
else:
msg = f'runPLoM._prepare_training_data: no IM.csv in {run_dir}.'
- print(msg)
+ print(msg) # noqa: T201
# load response.csv if exists
- df_SIMU = pd.DataFrame()
- if os.path.exists(os.path.join(run_dir, 'response.csv')):
- df_SIMU = pd.read_csv(
- os.path.join(run_dir, 'response.csv'),
+ df_SIMU = pd.DataFrame() # noqa: N806
+ if os.path.exists(os.path.join(run_dir, 'response.csv')): # noqa: PTH110, PTH118
+ df_SIMU = pd.read_csv( # noqa: N806
+ os.path.join(run_dir, 'response.csv'), # noqa: PTH118
index_col=None,
)
else:
@@ -296,21 +296,21 @@ def _prepare_training_data(self, run_dir):
# read BIM to get RV names
# KZ modified 0331
- with open(
- os.path.join(run_dir, 'templatedir', self.input_file),
+ with open( # noqa: PTH123
+ os.path.join(run_dir, 'templatedir', self.input_file), # noqa: PTH118
encoding='utf-8',
) as f:
tmp = json.load(f)
- rVs = tmp.get('randomVariables', None)
+ rVs = tmp.get('randomVariables', None) # noqa: N806
if rVs is None:
rv_names = []
else:
rv_names = [x.get('name') for x in rVs]
# collect rv columns from df_SIMU
- df_RV = pd.DataFrame()
+ df_RV = pd.DataFrame() # noqa: N806
if len(rv_names) > 0:
- df_RV = df_SIMU[rv_names]
+ df_RV = df_SIMU[rv_names] # noqa: N806
for cur_rv in rv_names:
df_SIMU.pop(cur_rv)
if '%eval_id' in list(df_SIMU.columns):
@@ -324,9 +324,9 @@ def _prepare_training_data(self, run_dir):
# concat df_RV and df_IM
if not df_IM.empty:
- df_X = pd.concat([df_IM, df_RV], axis=1)
+ df_X = pd.concat([df_IM, df_RV], axis=1) # noqa: N806
else:
- df_X = df_RV
+ df_X = df_RV # noqa: N806
if not df_X.empty and '%eval_id' in list(df_X.columns):
df_X.pop('%eval_id')
if not df_X.empty and '%MultipleEvent' in list(df_X.columns):
@@ -336,18 +336,18 @@ def _prepare_training_data(self, run_dir):
# make the first column name start with %
if not df_X.empty:
- df_X = df_X.rename(
- {list(df_X.columns)[0]: '%' + list(df_X.columns)[0]},
+ df_X = df_X.rename( # noqa: N806
+ {list(df_X.columns)[0]: '%' + list(df_X.columns)[0]}, # noqa: RUF015
axis='columns',
)
- df_SIMU = df_SIMU.rename(
- {list(df_SIMU.columns)[0]: '%' + list(df_SIMU.columns)[0]},
+ df_SIMU = df_SIMU.rename( # noqa: N806
+ {list(df_SIMU.columns)[0]: '%' + list(df_SIMU.columns)[0]}, # noqa: RUF015
axis='columns',
)
# save to csvs
- inpData = os.path.join(run_dir, 'PLoM_variables.csv')
- outData = os.path.join(run_dir, 'PLoM_responses.csv')
+ inpData = os.path.join(run_dir, 'PLoM_variables.csv') # noqa: PTH118, N806
+ outData = os.path.join(run_dir, 'PLoM_responses.csv') # noqa: PTH118, N806
df_X.to_csv(inpData, index=False)
df_SIMU.to_csv(outData, index=False)
@@ -359,14 +359,14 @@ def _prepare_training_data(self, run_dir):
return inpData, outData
- def _compute_IM(self, run_dir, pythonEXE):
+ def _compute_IM(self, run_dir, pythonEXE): # noqa: N802, N803
# find workdirs
workdir_list = [x for x in os.listdir(run_dir) if x.startswith('workdir')]
# intensity measure app
- computeIM = os.path.join(
- os.path.dirname(
- os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ computeIM = os.path.join( # noqa: PTH118, N806
+ os.path.dirname( # noqa: PTH120
+ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # noqa: PTH100, PTH120
),
'createEVENT',
'groundMotionIM',
@@ -376,8 +376,8 @@ def _compute_IM(self, run_dir, pythonEXE):
# compute IMs
for cur_workdir in workdir_list:
os.chdir(cur_workdir)
- if os.path.exists('EVENT.json') and os.path.exists('AIM.json'):
- os.system(
+ if os.path.exists('EVENT.json') and os.path.exists('AIM.json'): # noqa: PTH110
+ os.system( # noqa: S605
f'{pythonEXE} {computeIM} --filenameAIM AIM.json --filenameEVENT EVENT.json --filenameIM IM.json'
)
os.chdir(run_dir)
@@ -385,13 +385,13 @@ def _compute_IM(self, run_dir, pythonEXE):
# collect IMs from different workdirs
for i, cur_workdir in enumerate(workdir_list):
cur_id = int(cur_workdir.split('.')[-1])
- if os.path.exists(os.path.join(cur_workdir, 'IM.csv')):
+ if os.path.exists(os.path.join(cur_workdir, 'IM.csv')): # noqa: PTH110, PTH118
try:
tmp1 = pd.read_csv(
- os.path.join(cur_workdir, 'IM.csv'),
+ os.path.join(cur_workdir, 'IM.csv'), # noqa: PTH118
index_col=None,
)
- except:
+ except: # noqa: E722
return
if tmp1.empty:
return
@@ -417,7 +417,7 @@ def _create_variables(self, training_data):
y_dim: dimension of Y data
rv_name: random variable name (X data)
g_name: variable name (Y data)
- """
+ """ # noqa: D205, D400
job_config = self.job_config
# initialization
@@ -432,18 +432,18 @@ def _create_variables(self, training_data):
# read X and Y variable names
for rv in job_config['randomVariables']:
- rv_name = rv_name + [rv['name']]
+ rv_name = rv_name + [rv['name']] # noqa: RUF005
x_dim += 1
if x_dim == 0:
msg = 'Error reading json: RV is empty'
self.errlog.exit(msg)
for g in job_config['EDP']:
if g['length'] == 1: # scalar
- g_name = g_name + [g['name']]
+ g_name = g_name + [g['name']] # noqa: RUF005
y_dim += 1
else: # vector
for nl in range(g['length']):
- g_name = g_name + ['{}_{}'.format(g['name'], nl + 1)]
+ g_name = g_name + ['{}_{}'.format(g['name'], nl + 1)] # noqa: RUF005
y_dim += 1
if y_dim == 0:
msg = 'Error reading json: EDP(QoI) is empty'
@@ -471,13 +471,13 @@ def _create_variables_from_input(self):
else:
self.multipleEvent = None
- def _parse_plom_parameters(self, surrogateInfo):
+ def _parse_plom_parameters(self, surrogateInfo): # noqa: C901, N803
"""_parse_plom_parameters: parse PLoM parameters from surrogateInfo
input:
surrogateInfo: surrogate information dictionary
output:
run_flag: 0 - success, 1: failure
- """
+ """ # noqa: D205, D400
run_flag = 0
try:
self.n_mc = int(surrogateInfo['newSampleRatio'])
@@ -495,10 +495,10 @@ def _parse_plom_parameters(self, surrogateInfo):
if self.smootherKDE_file and self.smootherKDE_dir:
# KZ, 07/24: both file and file path received
# Note that the file is saved by the frontend to the work_dir -> overwrite self.smootherKDE_file
- self.smootherKDE_file = os.path.join(
+ self.smootherKDE_file = os.path.join( # noqa: PTH118
work_dir, 'templatedir', self.smootherKDE_file
)
- if not os.path.isfile(self.smootherKDE_file):
+ if not os.path.isfile(self.smootherKDE_file): # noqa: PTH113
# not found the file
msg = f'Error finding user-defined function file for KDE: {self.smootherKDE_file}.'
errlog.exit(msg)
@@ -525,10 +525,10 @@ def _parse_plom_parameters(self, surrogateInfo):
if self.kdeTolerance_file and self.kdeTolerance_dir:
# KZ, 07/24: both file and file path received
# Note that the file is saved by the frontend to the work_dir -> overwrite self.kdeTolerance_file
- self.kdeTolerance_file = os.path.join(
+ self.kdeTolerance_file = os.path.join( # noqa: PTH118
work_dir, 'templatedir', self.kdeTolerance_file
)
- if not os.path.isfile(self.kdeTolerance_file):
+ if not os.path.isfile(self.kdeTolerance_file): # noqa: PTH113
# not found the file
msg = f'Error finding user-defined function file for KDE: {self.kdeTolerance_file}.'
errlog.exit(msg)
@@ -542,14 +542,14 @@ def _parse_plom_parameters(self, surrogateInfo):
self.kdeTolerance = surrogateInfo.get('kdeTolerance', 0.1)
# self.kdeTolerance = surrogateInfo.get("kdeTolerance",0.1)
if self.constraintsFlag:
- self.constraintsFile = os.path.join(
+ self.constraintsFile = os.path.join( # noqa: PTH118
work_dir, 'templatedir/plomConstraints.py'
)
self.numIter = surrogateInfo.get('numIter', 50)
self.tolIter = surrogateInfo.get('tolIter', 0.02)
self.preTrained = surrogateInfo.get('preTrained', False)
if self.preTrained:
- self.preTrainedModel = os.path.join(
+ self.preTrainedModel = os.path.join( # noqa: PTH118
work_dir, 'templatedir/surrogatePLoM.h5'
)
@@ -558,7 +558,7 @@ def _parse_plom_parameters(self, surrogateInfo):
msg = 'runPLoM._parse_plom_parameters: Error in loading hyperparameter functions.'
self.errlog.exit(msg)
- except:
+ except: # noqa: E722
run_flag = 1
# return
@@ -570,7 +570,7 @@ def _set_up_parallel(self):
none
output:
run_flag: 0 - success, 1 - failure
- """
+ """ # noqa: D205, D400
run_flag = 0
try:
if self.run_type.lower() == 'runninglocal':
@@ -585,16 +585,16 @@ def _set_up_parallel(self):
self.world = MPI.COMM_WORLD
self.pool = MPIPoolExecutor()
self.n_processor = self.world.Get_size()
- print('nprocessor :')
- print(self.n_processor)
+ print('nprocessor :') # noqa: T201
+ print(self.n_processor) # noqa: T201
self.cal_interval = self.n_processor
- except:
+ except: # noqa: E722
run_flag = 1
# return
return run_flag
- def _load_variables(self, do_sampling, do_simulation):
+ def _load_variables(self, do_sampling, do_simulation): # noqa: C901
"""_load_variables: load variables
input:
do_sampling: sampling flag
@@ -602,7 +602,7 @@ def _load_variables(self, do_sampling, do_simulation):
job_config: job configuration dictionary
output:
run_flag: 0 - success, 1 - failure
- """
+ """ # noqa: D205, D400
job_config = self.job_config
run_flag = 0
@@ -610,35 +610,35 @@ def _load_variables(self, do_sampling, do_simulation):
if do_sampling:
pass
else:
- X = read_txt(self.inpData, self.errlog)
- print('X = ', X)
- print(X.columns)
+ X = read_txt(self.inpData, self.errlog) # noqa: N806
+ print('X = ', X) # noqa: T201
+ print(X.columns) # noqa: T201
if len(X.columns) != self.x_dim:
msg = f'Error importing input data: Number of dimension inconsistent: have {self.x_dim} RV(s) but {len(X.columns)} column(s).'
errlog.exit(msg)
if self.logTransform:
- X = np.log(X)
+ X = np.log(X) # noqa: N806
if do_simulation:
pass
else:
- Y = read_txt(self.outData, self.errlog)
+ Y = read_txt(self.outData, self.errlog) # noqa: N806
if Y.shape[1] != self.y_dim:
msg = f'Error importing input data: Number of dimension inconsistent: have {self.y_dim} QoI(s) but {len(Y.columns)} column(s).'
errlog.exit(msg)
if self.logTransform:
- Y = np.log(Y)
+ Y = np.log(Y) # noqa: N806
if X.shape[0] != Y.shape[0]:
msg = f'Warning importing input data: numbers of samples of inputs ({len(X.columns)}) and outputs ({len(Y.columns)}) are inconsistent'
- print(msg)
+ print(msg) # noqa: T201
n_samp = Y.shape[0]
# writing a data file for PLoM input
self.X = X.to_numpy()
self.Y = Y.to_numpy()
- inputXY = os.path.join(work_dir, 'templatedir/inputXY.csv')
- X_Y = pd.concat([X, Y], axis=1)
+ inputXY = os.path.join(work_dir, 'templatedir/inputXY.csv') # noqa: PTH118, N806
+ X_Y = pd.concat([X, Y], axis=1) # noqa: N806
X_Y.to_csv(inputXY, sep=',', header=True, index=False)
self.inputXY = inputXY
self.n_samp = n_samp
@@ -650,18 +650,18 @@ def _load_variables(self, do_sampling, do_simulation):
self.rvVal = []
try:
for nx in range(self.x_dim):
- rvInfo = job_config['randomVariables'][nx]
- self.rvName = self.rvName + [rvInfo['name']]
- self.rvDist = self.rvDist + [rvInfo['distribution']]
+ rvInfo = job_config['randomVariables'][nx] # noqa: N806
+ self.rvName = self.rvName + [rvInfo['name']] # noqa: RUF005
+ self.rvDist = self.rvDist + [rvInfo['distribution']] # noqa: RUF005
if do_sampling:
- self.rvVal = self.rvVal + [
+ self.rvVal = self.rvVal + [ # noqa: RUF005
(rvInfo['upperbound'] + rvInfo['lowerbound']) / 2
]
else:
- self.rvVal = self.rvVal + [np.mean(self.X[:, nx])]
- except:
+ self.rvVal = self.rvVal + [np.mean(self.X[:, nx])] # noqa: RUF005
+ except: # noqa: E722
msg = 'Warning: randomVariables attributes in configuration file are not consistent with x_dim'
- print(msg)
+ print(msg) # noqa: T201
# except:
# run_flag = 1
@@ -673,43 +673,43 @@ def _load_hyperparameter(self):
run_flag = 0
try:
# load constraints first
- constr_file = Path(self.constraintsFile).resolve()
+ constr_file = Path(self.constraintsFile).resolve() # noqa: F405
sys.path.insert(0, str(constr_file.parent) + '/')
- constr_script = importlib.__import__(
+ constr_script = importlib.__import__( # noqa: F405
constr_file.name[:-3], globals(), locals(), [], 0
)
self.beta_c = constr_script.beta_c()
- print('beta_c = ', self.beta_c)
+ print('beta_c = ', self.beta_c) # noqa: T201
# if smootherKDE
if self.smootherKDE_Customize:
- kde_file = Path(self.smootherKDE_file).resolve()
+ kde_file = Path(self.smootherKDE_file).resolve() # noqa: F405
sys.path.insert(0, str(kde_file.parent) + '/')
- kde_script = importlib.__import__(
+ kde_script = importlib.__import__( # noqa: F405
kde_file.name[:-3], globals(), locals(), [], 0
)
self.get_epsilon_k = kde_script.get_epsilon_k
# evaluating the function
self.smootherKDE = self.get_epsilon_k(self.beta_c)
- print('epsilon_k = ', self.smootherKDE)
+ print('epsilon_k = ', self.smootherKDE) # noqa: T201
# if tolKDE
if self.kdeTolerance_Customize:
- beta_file = Path(self.kdeTolerance_file).resolve()
+ beta_file = Path(self.kdeTolerance_file).resolve() # noqa: F405
sys.path.insert(0, str(beta_file.parent) + '/')
- beta_script = importlib.__import__(
+ beta_script = importlib.__import__( # noqa: F405
beta_file.name[:-3], globals(), locals(), [], 0
)
self.get_epsilon_db = beta_script.get_epsilon_db
# evaluating the function
self.kdeTolerance = self.get_epsilon_db(self.beta_c)
- print('epsilon_db = ', self.kdeTolerance)
- except:
+ print('epsilon_db = ', self.kdeTolerance) # noqa: T201
+ except: # noqa: E722
run_flag = 1
return run_flag
- def train_model(self, model_name='SurrogatePLoM'):
- db_path = os.path.join(self.work_dir, 'templatedir')
+ def train_model(self, model_name='SurrogatePLoM'): # noqa: D102
+ db_path = os.path.join(self.work_dir, 'templatedir') # noqa: PTH118
if not self.preTrained:
- self.modelPLoM = PLoM(
+ self.modelPLoM = PLoM( # noqa: F405
model_name=model_name,
data=self.inputXY,
separator=',',
@@ -721,7 +721,7 @@ def train_model(self, model_name='SurrogatePLoM'):
plot_tag=True,
)
else:
- self.modelPLoM = PLoM(
+ self.modelPLoM = PLoM( # noqa: F405
model_name=model_name,
data=self.preTrainedModel,
db_path=db_path,
@@ -756,17 +756,17 @@ def train_model(self, model_name='SurrogatePLoM'):
if self.constraintsFlag:
self.Errors = self.modelPLoM.errors
- def save_model(self):
+ def save_model(self): # noqa: C901, D102
# copy the h5 model file to the main work dir
shutil.copy2(
- os.path.join(
+ os.path.join( # noqa: PTH118
self.work_dir, 'templatedir', 'SurrogatePLoM', 'SurrogatePLoM.h5'
),
self.work_dir,
)
if self.n_mc > 0:
shutil.copy2(
- os.path.join(
+ os.path.join( # noqa: PTH118
self.work_dir,
'templatedir',
'SurrogatePLoM',
@@ -793,11 +793,11 @@ def save_model(self):
# np.savetxt(self.work_dir + '/dakotaTab.out', xy_data, header=header_string, fmt='%1.4e', comments='%')
# np.savetxt(self.work_dir + '/inputTab.out', self.X, header=header_string_x[1:-1], fmt='%1.4e', comments='%')
# np.savetxt(self.work_dir + '/outputTab.out', self.Y, header=header_string_y[1:], fmt='%1.4e', comments='%')
- df_inputTab = pd.DataFrame(data=self.X, columns=self.rv_name)
- df_outputTab = pd.DataFrame(data=self.Y, columns=self.g_name)
- df_inputTab.to_csv(os.path.join(self.work_dir, 'inputTab.out'), index=False)
+ df_inputTab = pd.DataFrame(data=self.X, columns=self.rv_name) # noqa: N806
+ df_outputTab = pd.DataFrame(data=self.Y, columns=self.g_name) # noqa: N806
+ df_inputTab.to_csv(os.path.join(self.work_dir, 'inputTab.out'), index=False) # noqa: PTH118
df_outputTab.to_csv(
- os.path.join(self.work_dir, 'outputTab.out'),
+ os.path.join(self.work_dir, 'outputTab.out'), # noqa: PTH118
index=False,
)
@@ -830,13 +830,13 @@ def save_model(self):
rvs['name'] = self.rvName[nx]
rvs['distribution'] = self.rvDist[nx]
rvs['value'] = self.rvVal[nx]
- rv_list = rv_list + [rvs]
+ rv_list = rv_list + [rvs] # noqa: RUF005
results['randomVariables'] = rv_list
- except:
+ except: # noqa: E722
msg = 'Warning: randomVariables attributes in configuration file are not consistent with x_dim'
- print(msg)
- results['dirPLoM'] = os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ print(msg) # noqa: T201
+ results['dirPLoM'] = os.path.join( # noqa: PTH118
+ os.path.dirname(os.path.abspath(__file__)), # noqa: PTH100, PTH120
'PLoM',
)
@@ -848,9 +848,9 @@ def save_model(self):
results['Errors'] = self.Errors
if self.n_mc > 0:
- Xnew = pd.read_csv(self.work_dir + '/X_new.csv', header=0, index_col=0)
+ Xnew = pd.read_csv(self.work_dir + '/X_new.csv', header=0, index_col=0) # noqa: N806
if self.logTransform:
- Xnew = np.exp(Xnew)
+ Xnew = np.exp(Xnew) # noqa: N806
for nx in range(self.x_dim):
results['xPredict'][self.rv_name[nx]] = Xnew.iloc[:, nx].tolist()
@@ -879,13 +879,13 @@ def save_model(self):
# KZ: adding MultipleEvent if any
if self.multipleEvent is not None:
tmp = pd.read_csv(
- os.path.join(self.work_dir, 'dakotaTab.out'),
+ os.path.join(self.work_dir, 'dakotaTab.out'), # noqa: PTH118
index_col=None,
sep=' ',
)
tmp = pd.concat([tmp, self.multipleEvent], axis=1)
tmp.to_csv(
- os.path.join(self.work_dir, 'dakotaTab.out'),
+ os.path.join(self.work_dir, 'dakotaTab.out'), # noqa: PTH118
index=False,
sep=' ',
)
@@ -904,22 +904,22 @@ def save_model(self):
Xnew.insert(0, '%', [x + 1 for x in list(Xnew.index)])
Xnew.to_csv(self.work_dir + '/dakotaTab.out', index=False, sep=' ')
- if os.path.exists('dakota.out'):
- os.remove('dakota.out')
+ if os.path.exists('dakota.out'): # noqa: PTH110
+ os.remove('dakota.out') # noqa: PTH107
- with open('dakota.out', 'w', encoding='utf-8') as fp:
+ with open('dakota.out', 'w', encoding='utf-8') as fp: # noqa: PTH123
json.dump(results, fp, indent=2)
- print('Results Saved')
+ print('Results Saved') # noqa: T201
-def read_txt(text_dir, errlog):
- if not os.path.exists(text_dir):
+def read_txt(text_dir, errlog): # noqa: D103
+ if not os.path.exists(text_dir): # noqa: PTH110
msg = 'Error: file does not exist: ' + text_dir
errlog.exit(msg)
header_line = []
- with open(text_dir) as f:
+ with open(text_dir) as f: # noqa: PTH123
# Iterate through the file until the table starts
header_count = 0
for line in f:
@@ -927,44 +927,44 @@ def read_txt(text_dir, errlog):
header_count = header_count + 1
header_line = line[1:] # remove '%'
try:
- with open(text_dir) as f:
- X = np.loadtxt(f, skiprows=header_count)
+ with open(text_dir) as f: # noqa: PTH123, PLW2901
+ X = np.loadtxt(f, skiprows=header_count) # noqa: N806
except ValueError:
try:
- with open(text_dir) as f:
- X = np.genfromtxt(f, skip_header=header_count, delimiter=',')
+ with open(text_dir) as f: # noqa: PTH123, PLW2901
+ X = np.genfromtxt(f, skip_header=header_count, delimiter=',') # noqa: N806
# if there are extra delimiter, remove nan
if np.isnan(X[-1, -1]):
- X = np.delete(X, -1, 1)
+ X = np.delete(X, -1, 1) # noqa: N806
except ValueError:
msg = 'Error: file format is not supported ' + text_dir
errlog.exit(msg)
if X.ndim == 1:
- X = np.array([X]).transpose()
+ X = np.array([X]).transpose() # noqa: N806
- print('X = ', X)
+ print('X = ', X) # noqa: T201
# df_X = pd.DataFrame(data=X, columns=["V"+str(x) for x in range(X.shape[1])])
if len(header_line) > 0:
- df_X = pd.DataFrame(data=X, columns=header_line.replace('\n', '').split(','))
+ df_X = pd.DataFrame(data=X, columns=header_line.replace('\n', '').split(',')) # noqa: N806
else:
- df_X = pd.DataFrame()
+ df_X = pd.DataFrame() # noqa: N806
- print('df_X = ', df_X)
+ print('df_X = ', df_X) # noqa: T201
return df_X
-class errorLog:
+class errorLog: # noqa: D101
def __init__(self, work_dir):
- self.file = open(f'{work_dir}/dakota.err', 'w')
+ self.file = open(f'{work_dir}/dakota.err', 'w') # noqa: SIM115, PTH123
- def exit(self, msg):
- print(msg)
+ def exit(self, msg): # noqa: D102
+ print(msg) # noqa: T201
self.file.write(msg)
self.file.close()
- exit(-1)
+ exit(-1) # noqa: PLR1722
def build_surrogate(work_dir, os_type, run_type, input_file, workflow_driver):
@@ -973,12 +973,12 @@ def build_surrogate(work_dir, os_type, run_type, input_file, workflow_driver):
work_dir: working directory
run_type: job type
os_type: operating system type
- """
+ """ # noqa: D205, D400
# t_total = time.process_time()
# default filename
- filename = 'PLoM_Model'
+ filename = 'PLoM_Model' # noqa: F841
# read the configuration file
- f = open(work_dir + '/templatedir/' + input_file)
+ f = open(work_dir + '/templatedir/' + input_file) # noqa: SIM115, PTH123
try:
job_config = json.load(f)
except ValueError:
@@ -1014,10 +1014,10 @@ def build_surrogate(work_dir, os_type, run_type, input_file, workflow_driver):
"""
# collect arguments
- inputArgs = sys.argv
+ inputArgs = sys.argv # noqa: N816
# working directory
work_dir = inputArgs[1].replace(os.sep, '/')
- print(f'work_dir = {work_dir}')
+ print(f'work_dir = {work_dir}') # noqa: T201
# print the work_dir
errlog = errorLog(work_dir)
# job type
@@ -1027,8 +1027,8 @@ def build_surrogate(work_dir, os_type, run_type, input_file, workflow_driver):
# default output file: results.out
result_file = 'results.out'
# input file name
- input_file = os.path.basename(inputArgs[2])
- print(f'input_file = {input_file}')
+ input_file = os.path.basename(inputArgs[2]) # noqa: PTH119
+ print(f'input_file = {input_file}') # noqa: T201
# workflowDriver
workflow_driver = inputArgs[3]
# start build the surrogate
diff --git a/modules/performUQ/SimCenterUQ/surrogateBuild.py b/modules/performUQ/SimCenterUQ/surrogateBuild.py
index 2f925593f..083f02965 100644
--- a/modules/performUQ/SimCenterUQ/surrogateBuild.py
+++ b/modules/performUQ/SimCenterUQ/surrogateBuild.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2021 Leland Stanford Junior University
# Copyright (c) 2021 The Regents of the University of California
#
@@ -53,42 +53,42 @@
warnings.filterwarnings('ignore')
-file_dir = os.path.dirname(__file__)
+file_dir = os.path.dirname(__file__) # noqa: PTH120
sys.path.append(file_dir)
-from UQengine import UQengine
+from UQengine import UQengine # noqa: E402
# import pip installed modules
try:
- moduleName = 'numpy'
+ moduleName = 'numpy' # noqa: N816
import numpy as np
- moduleName = 'GPy'
- import GPy as GPy
+ moduleName = 'GPy' # noqa: N816
+ import GPy as GPy # noqa: PLC0414
- moduleName = 'scipy'
+ moduleName = 'scipy' # noqa: N816
from scipy.stats import cramervonmises, lognorm, norm, qmc
- moduleName = 'UQengine'
+ moduleName = 'UQengine' # noqa: N816
# from utilities import run_FEM_batch, errorLog
error_tag = False # global variable
-except:
+except: # noqa: E722
error_tag = True
- print('Failed to import module:' + moduleName)
+ print('Failed to import module:' + moduleName) # noqa: T201
-errFileName = 'dakota.err'
-sys.stderr = open(errFileName, 'w')
+errFileName = 'dakota.err' # noqa: N816
+sys.stderr = open(errFileName, 'w') # noqa: SIM115, PTH123
#
# Modify GPy package
#
-if error_tag == False:
+if error_tag == False: # noqa: E712
- def monkeypatch_method(cls):
+ def monkeypatch_method(cls): # noqa: D103
def decorator(func):
setattr(cls, func.__name__, func)
return func
@@ -96,13 +96,13 @@ def decorator(func):
return decorator
@monkeypatch_method(GPy.models.gp_regression.GPRegression)
- def randomize(self, rand_gen=None, *args, **kwargs):
+ def randomize(self, rand_gen=None, *args, **kwargs): # noqa: D103
if rand_gen is None:
rand_gen = np.random.normal
# first take care of all parameters (from N(0,1))
- x = rand_gen(size=self._size_transformed(), *args, **kwargs)
+ x = rand_gen(size=self._size_transformed(), *args, **kwargs) # noqa: B026
updates = self.update_model()
- self.update_model(False) # Switch off the updates
+ self.update_model(False) # Switch off the updates # noqa: FBT003
self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...)
# now draw from prior where possible
x = self.param_array.copy()
@@ -121,13 +121,13 @@ def randomize(self, rand_gen=None, *args, **kwargs):
# Main function
-def main(inputArgs):
- gp = surrogate(inputArgs)
+def main(inputArgs): # noqa: N803, D103
+ gp = surrogate(inputArgs) # noqa: F841
-class surrogate(UQengine):
- def __init__(self, inputArgs):
- super(surrogate, self).__init__(inputArgs)
+class surrogate(UQengine): # noqa: D101
+ def __init__(self, inputArgs): # noqa: N803
+ super(surrogate, self).__init__(inputArgs) # noqa: UP008
t_init = time.time()
#
@@ -160,13 +160,13 @@ def __init__(self, inputArgs):
self.save_model('SimGpModel')
- def check_packages(self, error_tag, moduleName):
- if error_tag == True and moduleName == 'GPy':
+ def check_packages(self, error_tag, moduleName): # noqa: N803, D102
+ if error_tag == True and moduleName == 'GPy': # noqa: E712
if self.os_type.lower().startswith('darwin'):
msg = 'Surrogate modeling module uses GPy python package which is facing a version compatibility issue at this moment (01.05.2024). To use the surrogate module, one needs to update manually the GPy version to 1.13. The instruction can be found in the the documentation: https://nheri-simcenter.github.io/quoFEM-Documentation/common/user_manual/usage/desktop/SimCenterUQSurrogate.html#lblsimsurrogate'
self.exit(msg)
- if error_tag == True:
+ if error_tag == True: # noqa: E712
if self.os_type.lower().startswith('win'):
msg = (
'Failed to load python module ['
@@ -181,19 +181,19 @@ def check_packages(self, error_tag, moduleName):
)
self.exit(msg)
- def readJson(self):
+ def readJson(self): # noqa: C901, N802, D102, PLR0912, PLR0915
# self.nopt = max([20, self.n_processor])
self.nopt = 1
try:
- jsonPath = self.inputFile # for EEUQ
- if not os.path.isabs(jsonPath):
- jsonPath = (
+ jsonPath = self.inputFile # for EEUQ # noqa: N806
+ if not os.path.isabs(jsonPath): # noqa: PTH117
+ jsonPath = ( # noqa: N806
self.work_dir + '/templatedir/' + self.inputFile
) # for quoFEM
- with open(jsonPath, encoding='utf-8') as f:
- dakotaJson = json.load(f)
+ with open(jsonPath, encoding='utf-8') as f: # noqa: PTH123
+ dakotaJson = json.load(f) # noqa: N806
except ValueError:
msg = 'invalid json format - dakota.json'
@@ -206,7 +206,7 @@ def readJson(self):
)
self.exit(msg)
- surrogateJson = dakotaJson['UQ']['surrogateMethodInfo']
+ surrogateJson = dakotaJson['UQ']['surrogateMethodInfo'] # noqa: N806
if surrogateJson['method'] == 'Sampling and Simulation':
random.seed(surrogateJson['seed'])
@@ -218,20 +218,20 @@ def readJson(self):
#
# EE-UQ
#
- # TODO: multihazards?
+ # TODO: multihazards? # noqa: TD002
self.isEEUQ = False
- if dakotaJson['Applications'].get('Events') != None:
- Evt = dakotaJson['Applications']['Events']
- if Evt[0].get('EventClassification') != None:
+ if dakotaJson['Applications'].get('Events') != None: # noqa: E711
+ Evt = dakotaJson['Applications']['Events'] # noqa: N806
+ if Evt[0].get('EventClassification') != None: # noqa: E711
if Evt[0]['EventClassification'] == 'Earthquake':
self.isEEUQ = True
self.rv_name_ee = []
- if surrogateJson.get('IntensityMeasure') != None and self.isEEUQ:
+ if surrogateJson.get('IntensityMeasure') != None and self.isEEUQ: # noqa: E711
self.intensityMeasure = surrogateJson['IntensityMeasure']
self.intensityMeasure['useGeoMean'] = surrogateJson['useGeoMean']
self.unitInfo = dakotaJson['GeneralInformation']['units']
- for imName, imChar in surrogateJson['IntensityMeasure'].items():
+ for imName, imChar in surrogateJson['IntensityMeasure'].items(): # noqa: B007, N806, PERF102
# if imChar.get("Periods") != None:
# for pers in imChar["Periods"]:
# self.rv_name_ee += [imName+str(pers)]
@@ -248,14 +248,14 @@ def readJson(self):
# common for all surrogate options
#
- self.rv_name = list()
+ self.rv_name = list() # noqa: C408
x_dim = 0
for rv in dakotaJson['randomVariables']:
self.rv_name += [rv['name']]
x_dim += 1
- self.g_name = list()
+ self.g_name = list() # noqa: C408
y_dim = 0
for g in dakotaJson['EDP']:
@@ -282,7 +282,7 @@ def readJson(self):
self.exit(msg)
do_predictive = False
- automate_doe = False
+ automate_doe = False # noqa: F841
self.x_dim = x_dim
self.y_dim = y_dim
@@ -290,7 +290,7 @@ def readJson(self):
try:
self.do_parallel = surrogateJson['parallelExecution']
- except:
+ except: # noqa: E722
self.do_parallel = True
if self.do_parallel:
@@ -300,7 +300,7 @@ def readJson(self):
self.n_processor = 1
self.pool = 0
self.cal_interval = 5
- print(f'self.cal_interval : {self.cal_interval}')
+ print(f'self.cal_interval : {self.cal_interval}') # noqa: T201
#
# Advanced
@@ -315,7 +315,7 @@ def readJson(self):
self.nugget_opt = surrogateJson['nuggetOpt']
# self.heteroscedastic = surrogateJson["Heteroscedastic"]
- if (self.nugget_opt == 'Fixed Values') or (
+ if (self.nugget_opt == 'Fixed Values') or ( # noqa: PLR1714
self.nugget_opt == 'Fixed Bounds'
):
try:
@@ -341,20 +341,20 @@ def readJson(self):
self.stochastic = [False] * y_dim
if self.nugget_opt == 'Fixed Values':
- for Vals in self.nuggetVal:
+ for Vals in self.nuggetVal: # noqa: N806
if not np.isscalar(Vals):
msg = 'Error reading json: provide nugget values of each QoI with comma delimiter'
self.exit(msg)
elif self.nugget_opt == 'Fixed Bounds':
- for Bous in self.nuggetVal:
+ for Bous in self.nuggetVal: # noqa: N806
if np.isscalar(Bous):
msg = 'Error reading json: provide nugget bounds of each QoI in brackets with comma delimiter, e.g. [0.0,1.0],[0.0,2.0],...'
self.exit(msg)
elif isinstance(Bous, list):
msg = 'Error reading json: provide both lower and upper bounds of nugget'
self.exit(msg)
- elif Bous.shape[0] != 2:
+ elif Bous.shape[0] != 2: # noqa: PLR2004
msg = 'Error reading json: provide nugget bounds of each QoI in brackets with comma delimiter, e.g. [0.0,1.0],[0.0,2.0],...'
self.exit(msg)
elif Bous[0] > Bous[1]:
@@ -372,20 +372,20 @@ def readJson(self):
if self.stochastic[0]:
@monkeypatch_method(GPy.likelihoods.Gaussian)
- def gaussian_variance(self, Y_metadata=None):
+ def gaussian_variance(self, Y_metadata=None): # noqa: N803
if Y_metadata is None:
return self.variance
- else:
+ else: # noqa: RET505
return self.variance * Y_metadata['variance_structure']
@monkeypatch_method(GPy.core.GP)
- def set_XY2(self, X=None, Y=None, Y_metadata=None):
+ def set_XY2(self, X=None, Y=None, Y_metadata=None): # noqa: N802, N803
if Y_metadata is not None:
if self.Y_metadata is None:
self.Y_metadata = Y_metadata
else:
self.Y_metadata.update(Y_metadata)
- print('metadata_updated')
+ print('metadata_updated') # noqa: T201
self.set_XY(X, Y)
# Save model information
@@ -437,12 +437,12 @@ def set_XY2(self, X=None, Y=None, Y_metadata=None):
if self.do_mf:
try:
- moduleName = 'emukit'
+ moduleName = 'emukit' # noqa: N806
error_tag = False # global variable
- except:
- error_tag = True
- print('Failed to import module:' + moduleName)
+ except: # noqa: E722
+ error_tag = True # noqa: F841
+ print('Failed to import module:' + moduleName) # noqa: T201
if self.modelInfoHF.is_model:
self.ll = self.modelInfoHF.ll
@@ -491,9 +491,9 @@ def set_XY2(self, X=None, Y=None, Y_metadata=None):
self.rvDiscStr = []
self.rvDiscIdx = []
for nx in range(x_dim):
- rvInfo = dakotaJson['randomVariables'][nx]
- self.rvName = self.rvName + [rvInfo['name']]
- self.rvDist = self.rvDist + [rvInfo['distribution']]
+ rvInfo = dakotaJson['randomVariables'][nx] # noqa: N806
+ self.rvName = self.rvName + [rvInfo['name']] # noqa: RUF005
+ self.rvDist = self.rvDist + [rvInfo['distribution']] # noqa: RUF005
if self.modelInfoHF.is_model:
if rvInfo['distribution'] == 'Uniform':
self.rvVal += [(rvInfo['upperbound'] + rvInfo['lowerbound']) / 2]
@@ -504,13 +504,13 @@ def set_XY2(self, X=None, Y=None, Y_metadata=None):
self.rvDiscIdx = [nx]
elif self.modelInfoHF.is_data:
- self.rvVal = self.rvVal + [
+ self.rvVal = self.rvVal + [ # noqa: RUF005
np.mean(self.modelInfoHF.X_existing[:, nx])
]
else:
self.rvVal = [0] * self.x_dim
- def checkWorkflow(self, dakotaJson):
+ def checkWorkflow(self, dakotaJson): # noqa: N802, N803, D102
if dakotaJson['Applications']['EDP']['Application'] == 'SurrogateEDP':
msg = 'Error in SurrogateGP engine: Do not select [None] in the EDP tab. [None] is used only when using pre-trained surrogate, i.e. when [Surrogate] is selected in the SIM Tab.'
self.exit(msg)
@@ -522,13 +522,13 @@ def checkWorkflow(self, dakotaJson):
msg = 'Error in SurrogateGP engine: Do not select [None] in the FEM tab. [None] is used only when using pre-trained surrogate, i.e. when [Surrogate] is selected in the SIM Tab.'
self.exit(msg)
- maxSampSize = float('Inf')
+ maxSampSize = float('Inf') # noqa: N806
for rv in dakotaJson['randomVariables']:
if rv['distribution'] == 'discrete_design_set_string':
- maxSampSize = len(rv['elements'])
+ maxSampSize = len(rv['elements']) # noqa: N806
if (maxSampSize < dakotaJson['UQ']['surrogateMethodInfo']['samples']) and (
- 'IntensityMeasure' in dakotaJson['UQ']['surrogateMethodInfo'].keys()
+ 'IntensityMeasure' in dakotaJson['UQ']['surrogateMethodInfo'].keys() # noqa: SIM118
):
# if #sample is smaller than #GM & IM is used as input
msg = 'Error in SurrogateGP engine: The number of samples ({}) should NOT be greater than the number of ground motions ({}). Using the same number is highly recommended.'.format(
@@ -536,7 +536,7 @@ def checkWorkflow(self, dakotaJson):
)
self.exit(msg)
- def create_kernel(self, x_dim):
+ def create_kernel(self, x_dim): # noqa: D102
kernel = self.kernel
if kernel == 'Radial Basis':
kr = GPy.kern.RBF(input_dim=x_dim, ARD=True)
@@ -554,11 +554,11 @@ def create_kernel(self, x_dim):
kr = kr + GPy.kern.Linear(input_dim=x_dim, ARD=True)
if self.do_mf:
- kr = emf.kernels.LinearMultiFidelityKernel([kr.copy(), kr.copy()])
+ kr = emf.kernels.LinearMultiFidelityKernel([kr.copy(), kr.copy()]) # noqa: F821
return kr
- def create_gpy_model(self, X_dummy, Y_dummy, kr):
+ def create_gpy_model(self, X_dummy, Y_dummy, kr): # noqa: N803, D102
if not self.do_mf:
if not self.heteroscedastic:
m_tmp = GPy.models.GPRegression(
@@ -579,13 +579,13 @@ def create_gpy_model(self, X_dummy, Y_dummy, kr):
# for multi fidelity case
else:
- X_list, Y_list = emf.convert_lists_to_array.convert_xy_lists_to_arrays(
+ X_list, Y_list = emf.convert_lists_to_array.convert_xy_lists_to_arrays( # noqa: N806, F821
[X_dummy, X_dummy], [Y_dummy, Y_dummy]
)
- for i in range(y_dim):
- m_tmp = GPyMultiOutputWrapper(
- emf.models.GPyLinearMultiFidelityModel(
+ for i in range(y_dim): # noqa: B007, F821
+ m_tmp = GPyMultiOutputWrapper( # noqa: F821
+ emf.models.GPyLinearMultiFidelityModel( # noqa: F821
X_list, Y_list, kernel=kr.copy(), n_fidelities=2
),
2,
@@ -594,15 +594,15 @@ def create_gpy_model(self, X_dummy, Y_dummy, kr):
return m_tmp
- def create_gp_model(self):
+ def create_gp_model(self): # noqa: D102
x_dim = self.x_dim
y_dim = self.y_dim
# choose kernel
kr = self.create_kernel(x_dim)
- X_dummy = np.zeros((1, x_dim))
- Y_dummy = np.zeros((1, y_dim))
+ X_dummy = np.zeros((1, x_dim)) # noqa: N806
+ Y_dummy = np.zeros((1, y_dim)) # noqa: N806
# for single fidelity case
self.set_normalizer = True
@@ -622,18 +622,18 @@ def create_gp_model(self):
self.x_dim = x_dim
self.y_dim = y_dim
- def predict(self, m_tmp, X, noise=0):
+ def predict(self, m_tmp, X, noise=0): # noqa: ARG002, N803, D102
if not self.do_mf:
if all(np.mean(m_tmp.Y, axis=0) == m_tmp.Y):
return m_tmp.Y[
0
], 0 # if response is constant - just return constant
- elif self.heteroscedastic:
+ elif self.heteroscedastic: # noqa: RET505
return m_tmp.predict_noiseless(X)
else:
return m_tmp.predict_noiseless(X)
else:
- idxHF = np.argwhere(m_tmp.gpy_model.X[:, -1] == 0)
+ idxHF = np.argwhere(m_tmp.gpy_model.X[:, -1] == 0) # noqa: N806
if all(
np.mean(m_tmp.gpy_model.Y[idxHF, :], axis=0) == m_tmp.gpy_model.Y
):
@@ -641,20 +641,20 @@ def predict(self, m_tmp, X, noise=0):
m_tmp.gpy_model.Y[0],
0,
) # if high-fidelity response is constant - just return constant
- else:
- X_list = convert_x_list_to_array([X, X])
- X_list_h = X_list[X.shape[0] :]
+ else: # noqa: RET505
+ X_list = convert_x_list_to_array([X, X]) # noqa: N806, F821
+ X_list_h = X_list[X.shape[0] :] # noqa: N806
return m_tmp.predict(X_list_h)
- def set_XY(
+ def set_XY( # noqa: C901, N802, D102
self,
m_tmp,
ny,
- X_hf,
- Y_hf,
- X_lf=float('nan'),
- Y_lf=float('nan'),
- enforce_hom=False,
+ X_hf, # noqa: N803
+ Y_hf, # noqa: N803
+ X_lf=float('nan'), # noqa: N803
+ Y_lf=float('nan'), # noqa: N803
+ enforce_hom=False, # noqa: FBT002
):
#
# check if X dimension has changed...
@@ -662,12 +662,12 @@ def set_XY(
x_current_dim = self.x_dim
for parname in m_tmp.parameter_names():
if parname.endswith('lengthscale'):
- exec('x_current_dim = len(m_tmp.' + parname + ')')
+ exec('x_current_dim = len(m_tmp.' + parname + ')') # noqa: S102
if x_current_dim != X_hf.shape[1]:
kr = self.create_kernel(X_hf.shape[1])
- X_dummy = np.zeros((1, X_hf.shape[1]))
- Y_dummy = np.zeros((1, 1))
+ X_dummy = np.zeros((1, X_hf.shape[1])) # noqa: N806
+ Y_dummy = np.zeros((1, 1)) # noqa: N806
m_new = self.create_gpy_model(X_dummy, Y_dummy, kr)
m_tmp = m_new.copy()
# m_tmp.optimize()
@@ -677,18 +677,18 @@ def set_XY(
msg = 'Error running SimCenterUQ - Response contains negative values. Please uncheck the log-transform option in the UQ tab'
self.exit(msg)
- Y_hfs = np.log(Y_hf)
+ Y_hfs = np.log(Y_hf) # noqa: N806
else:
- Y_hfs = Y_hf
+ Y_hfs = Y_hf # noqa: N806
if self.do_logtransform and self.do_mf:
if np.min(Y_lf) < 0:
msg = 'Error running SimCenterUQ - Response contains negative values. Please uncheck the log-transform option in the UQ tab'
self.exit(msg)
- Y_lfs = np.log(Y_lf)
+ Y_lfs = np.log(Y_lf) # noqa: N806
else:
- Y_lfs = Y_lf
+ Y_lfs = Y_lf # noqa: N806
# # below is dummy
# if np.all(np.isnan(X_lf)) and np.all(np.isnan(Y_lf)):
@@ -700,11 +700,11 @@ def set_XY(
# m_tmp = GPy.models.GPHeteroscedasticRegression(
# X_hf, Y_hfs, kernel=self.kg.copy()
# )
- # # TODO: temporary... need to find a way to not calibrate but update the variance
+ # # TODO: temporary... need to find a way to not calibrate but update the variance # noqa: TD002
# m_tmp.optimize()
# self.var_str[ny] = np.ones((m_tmp.Y.shape[0], 1))
- X_new, X_idx, indices, counts = np.unique(
+ X_new, X_idx, indices, counts = np.unique( # noqa: N806
X_hf,
axis=0,
return_index=True,
@@ -726,13 +726,13 @@ def set_XY(
elif n_unique == X_hf.shape[0]: # no repl
# Y_mean=Y_hfs[X_idx]
# Y_mean1, nugget_mean1 = self.predictStoMeans(X_new, Y_mean)
- Y_mean1, nugget_mean1 = self.predictStoMeans(X_hf, Y_hfs)
+ Y_mean1, nugget_mean1 = self.predictStoMeans(X_hf, Y_hfs) # noqa: N806
- if np.max(nugget_mean1) < 1.0e-10:
+ if np.max(nugget_mean1) < 1.0e-10: # noqa: PLR2004
self.set_XY(m_tmp, ny, X_hf, Y_hfs, enforce_hom=True)
return None
- else:
- Y_metadata, m_var, norm_var_str = self.predictStoVars(
+ else: # noqa: RET505
+ Y_metadata, m_var, norm_var_str = self.predictStoVars( # noqa: N806
X_hf, (Y_hfs - Y_mean1) ** 2, X_hf, Y_hfs, counts
)
m_tmp.set_XY2(X_hf, Y_hfs, Y_metadata=Y_metadata)
@@ -744,20 +744,20 @@ def set_XY(
self.Y_mean[ny] = Y_hfs
else:
# nonunique set - check if nugget is zero
- Y_mean, Y_var = np.zeros((n_unique, 1)), np.zeros((n_unique, 1))
+ Y_mean, Y_var = np.zeros((n_unique, 1)), np.zeros((n_unique, 1)) # noqa: N806
for idx in range(n_unique):
- Y_subset = Y_hfs[[i for i in np.where(indices == idx)[0]], :]
+ Y_subset = Y_hfs[[i for i in np.where(indices == idx)[0]], :] # noqa: C416, N806
Y_mean[idx, :] = np.mean(Y_subset, axis=0)
Y_var[idx, :] = np.var(Y_subset, axis=0)
- idx_repl = [i for i in np.where(counts > 1)[0]]
+ idx_repl = [i for i in np.where(counts > 1)[0]] # noqa: C416
- if np.max(Y_var) / np.var(Y_mean) < 1.0e-10:
+ if np.max(Y_var) / np.var(Y_mean) < 1.0e-10: # noqa: PLR2004
# NUGGET IS ZERO - no need for stochastic kriging
if self.do_logtransform:
- Y_mean = np.exp(Y_mean)
+ Y_mean = np.exp(Y_mean) # noqa: N806
m_tmp = self.set_XY(
m_tmp, ny, X_new, Y_mean, X_lf, Y_lf
@@ -766,13 +766,13 @@ def set_XY(
self.indices_unique = indices
return m_tmp
- elif self.nugget_opt == 'Heteroscedastic':
+ elif self.nugget_opt == 'Heteroscedastic': # noqa: RET505
#
# Constructing secondary GP model - can we make use of the "variance of sample variance"
#
- # TODO: log-variance
+ # TODO: log-variance # noqa: TD002
- Y_metadata, m_var, norm_var_str = self.predictStoVars(
+ Y_metadata, m_var, norm_var_str = self.predictStoVars( # noqa: N806
X_new[idx_repl, :],
Y_var[idx_repl],
X_new,
@@ -820,9 +820,9 @@ def set_XY(
else:
(
- X_list_tmp,
- Y_list_tmp,
- ) = emf.convert_lists_to_array.convert_xy_lists_to_arrays(
+ X_list_tmp, # noqa: N806
+ Y_list_tmp, # noqa: N806
+ ) = emf.convert_lists_to_array.convert_xy_lists_to_arrays( # noqa: F821
[X_hf, X_lf], [Y_hfs, Y_lfs]
)
m_tmp.set_data(X=X_list_tmp, Y=Y_list_tmp)
@@ -838,7 +838,7 @@ def set_XY(
return m_tmp
- def predictStoVars(self, X_repl, Y_var_repl, X_new, Y_mean, counts):
+ def predictStoVars(self, X_repl, Y_var_repl, X_new, Y_mean, counts): # noqa: N802, N803, D102
my_x_dim = X_repl.shape[1]
kernel_var = GPy.kern.Matern52(
input_dim=my_x_dim, ARD=True
@@ -859,13 +859,13 @@ def predictStoVars(self, X_repl, Y_var_repl, X_new, Y_mean, counts):
myrange[nx] * 100,
warning=False,
)
- # TODO change the kernel
+ # TODO change the kernel # noqa: TD002, TD004
m_var.optimize(max_f_eval=1000)
m_var.optimize_restarts(
self.nopt, parallel=True, num_processes=self.n_processor, verbose=False
)
- print(m_var)
+ print(m_var) # noqa: T201
log_var_pred, dum = m_var.predict(X_new)
var_pred = np.exp(log_var_pred)
@@ -879,11 +879,11 @@ def predictStoVars(self, X_repl, Y_var_repl, X_new, Y_mean, counts):
norm_var_str = var_pred.T[0] # if normalization was used..
# norm_var_str = (X_new+2)**2/max((X_new+2)**2)
- Y_metadata = {'variance_structure': norm_var_str / counts}
+ Y_metadata = {'variance_structure': norm_var_str / counts} # noqa: N806
return Y_metadata, m_var, norm_var_str
- def predictStoMeans(self, X, Y):
+ def predictStoMeans(self, X, Y): # noqa: N802, N803, D102
# under homoscedasticity
my_x_dim = X.shape[1]
kernel_mean = GPy.kern.Matern52(input_dim=my_x_dim, ARD=True)
@@ -951,8 +951,8 @@ def predictStoMeans(self, X, Y):
"""
return mean_pred, mean_var
- def calibrate(self):
- print('Calibrating in parallel', flush=True)
+ def calibrate(self): # noqa: C901, D102
+ print('Calibrating in parallel', flush=True) # noqa: T201
warnings.filterwarnings('ignore')
t_opt = time.time()
nugget_opt_tmp = self.nugget_opt
@@ -982,12 +982,12 @@ def calibrate(self):
if msg != '':
self.exit(msg)
- # TODO: terminate it gracefully....
+ # TODO: terminate it gracefully.... # noqa: TD002
# see https://stackoverflow.com/questions/21104997/keyboard-interrupt-with-pythons-multiprocessing
else:
for ny in range(self.y_dim):
- self.m_list[ny], msg, ny = calibrating(
+ self.m_list[ny], msg, ny = calibrating( # noqa: PLW2901
copy.deepcopy(self.m_list[ny]),
nugget_opt_tmp,
self.nuggetVal,
@@ -1013,26 +1013,26 @@ def calibrate(self):
for ny in range(self.y_dim):
for parname in self.m_list[ny].parameter_names():
if parname.endswith('variance') and ('Gauss' not in parname):
- exec(
+ exec( # noqa: S102
'my_new_var = max(self.m_list[ny].'
+ variance_keyword
+ ', 10*self.m_list[ny].'
+ parname
+ ')'
)
- exec('self.m_list[ny].' + variance_keyword + '= my_new_var')
+ exec('self.m_list[ny].' + variance_keyword + '= my_new_var') # noqa: S102
self.m_list[ny].optimize()
self.calib_time = time.time() - t_opt
- print(f' Calibration time: {self.calib_time:.2f} s', flush=True)
- Y_preds, Y_pred_vars, Y_pred_vars_w_measures, e2 = (
+ print(f' Calibration time: {self.calib_time:.2f} s', flush=True) # noqa: T201
+ Y_preds, Y_pred_vars, Y_pred_vars_w_measures, e2 = ( # noqa: N806
self.get_cross_validation_err()
)
return Y_preds, Y_pred_vars, Y_pred_vars_w_measures, e2
- def train_surrogate(self, t_init):
+ def train_surrogate(self, t_init): # noqa: C901, D102, PLR0915
self.nc1 = min(200 * self.x_dim, 2000) # candidate points
self.nq = min(200 * self.x_dim, 2000) # integration points
# FEM index
@@ -1044,7 +1044,7 @@ def train_surrogate(self, t_init):
self.time_lf_avg = float('Inf')
self.time_ratio = 1
- x_dim = self.x_dim
+ x_dim = self.x_dim # noqa: F841
y_dim = self.y_dim
#
@@ -1058,9 +1058,9 @@ def train_surrogate(self, t_init):
self.rv_name, self.do_parallel, self.y_dim, t_init, model_hf.thr_t
)
- def FEM_batch_hf(X, id_sim):
+ def FEM_batch_hf(X, id_sim): # noqa: N802, N803
# DiscStr: Xstr will be replaced with the string
- Xstr = X.astype(str)
+ Xstr = X.astype(str) # noqa: N806
for nx in self.rvDiscIdx:
for ns in range(X.shape[0]):
@@ -1080,9 +1080,9 @@ def FEM_batch_hf(X, id_sim):
self.time_ratio = self.time_hf_avg / self.time_lf_avg
return res
- def FEM_batch_lf(X, id_sim):
+ def FEM_batch_lf(X, id_sim): # noqa: N802, N803
# DiscStr: Xstr will be replaced with the string
- Xstr = X.astype(str)
+ Xstr = X.astype(str) # noqa: N806
for nx in self.rvDiscIdx:
for ns in range(X.shape[0]):
@@ -1105,21 +1105,21 @@ def FEM_batch_lf(X, id_sim):
self.time_ratio = self.time_lf_avg / self.time_lf_avg
return res
- tmp = time.time()
+ tmp = time.time() # noqa: F841
#
# get initial samples for high fidelity modeling
#
- X_hf_tmp = model_hf.sampling(max([model_hf.n_init - model_hf.n_existing, 0]))
+ X_hf_tmp = model_hf.sampling(max([model_hf.n_init - model_hf.n_existing, 0])) # noqa: N806
#
# if X is from a data file & Y is from simulation
#
if model_hf.model_without_sampling:
- X_hf_tmp, model_hf.X_existing = model_hf.X_existing, X_hf_tmp
- X_hf_tmp, Y_hf_tmp, self.id_sim_hf = FEM_batch_hf(X_hf_tmp, self.id_sim_hf)
+ X_hf_tmp, model_hf.X_existing = model_hf.X_existing, X_hf_tmp # noqa: N806
+ X_hf_tmp, Y_hf_tmp, self.id_sim_hf = FEM_batch_hf(X_hf_tmp, self.id_sim_hf) # noqa: N806
if model_hf.X_existing.shape[0] == 0:
self.X_hf, self.Y_hf = X_hf_tmp, Y_hf_tmp
@@ -1133,19 +1133,19 @@ def FEM_batch_lf(X, id_sim):
np.vstack([model_hf.Y_existing, Y_hf_tmp]),
)
- X_lf_tmp = model_lf.sampling(max([model_lf.n_init - model_lf.n_existing, 0]))
+ X_lf_tmp = model_lf.sampling(max([model_lf.n_init - model_lf.n_existing, 0])) # noqa: N806
# Design of experiments - Nearest neighbor sampling
# Giselle Fernández-Godino, M., Park, C., Kim, N. H., & Haftka, R. T. (2019). Issues in deciding whether to use multifidelity surrogates. AIAA Journal, 57(5), 2039-2054.
self.n_LFHFoverlap = 0
new_x_lf_tmp = np.zeros((0, self.x_dim))
- X_tmp = X_lf_tmp
+ X_tmp = X_lf_tmp # noqa: N806
for x_hf in self.X_hf:
if X_tmp.shape[0] > 0:
- id = closest_node(x_hf, X_tmp, self.ll)
+ id = closest_node(x_hf, X_tmp, self.ll) # noqa: A001
new_x_lf_tmp = np.vstack([new_x_lf_tmp, x_hf])
- X_tmp = np.delete(X_tmp, id, axis=0)
+ X_tmp = np.delete(X_tmp, id, axis=0) # noqa: N806
self.n_LFHFoverlap += 1
new_x_lf_tmp = np.vstack([new_x_lf_tmp, X_tmp])
@@ -1184,7 +1184,7 @@ def FEM_batch_lf(X, id_sim):
self.NRMSE_hist = np.zeros((1, y_dim), float)
self.NRMSE_idx = np.zeros((1, 1), int)
- print('======== RUNNING GP DoE ===========', flush=True)
+ print('======== RUNNING GP DoE ===========', flush=True) # noqa: T201
#
# Run Design of experiments
@@ -1194,7 +1194,7 @@ def FEM_batch_lf(X, id_sim):
nc1 = self.nc1
nq = self.nq
n_new = 0
- while exit_flag == False:
+ while exit_flag == False: # noqa: E712
# Initial calibration
# Calibrate self.m_list
@@ -1203,7 +1203,7 @@ def FEM_batch_lf(X, id_sim):
)
if self.do_logtransform:
# self.Y_cv = np.exp(2*self.Y_cvs+self.Y_cv_vars)*(np.exp(self.Y_cv_vars)-1) # in linear space
- # TODO: Let us use median instead of mean?
+ # TODO: Let us use median instead of mean? # noqa: TD002
self.Y_cv = np.exp(self.Y_cvs)
self.Y_cv_var = np.exp(2 * self.Y_cvs + self.Y_cv_vars) * (
np.exp(self.Y_cv_vars) - 1
@@ -1219,9 +1219,9 @@ def FEM_batch_lf(X, id_sim):
if self.n_unique_hf < model_hf.thr_count:
if self.doeIdx == 'HF':
- tmp_doeIdx = self.doeIdx # single fideility
+ tmp_doeIdx = self.doeIdx # single fideility # noqa: N806
else:
- tmp_doeIdx = 'HFHF' # HF in multifideility
+ tmp_doeIdx = 'HFHF' # HF in multifideility # noqa: N806
[x_new_hf, y_idx_hf, score_hf] = self.run_design_of_experiments(
nc1, nq, e2, tmp_doeIdx
@@ -1237,22 +1237,22 @@ def FEM_batch_lf(X, id_sim):
score_lf = 0 # score : reduced amount of variance
if self.doeIdx == 'HFLF':
- fideilityIdx = np.argmax(
+ fideilityIdx = np.argmax( # noqa: N806
[score_hf / self.time_hf_avg, score_lf / self.time_lf_avg]
)
if fideilityIdx == 0:
- tmp_doeIdx = 'HF'
+ tmp_doeIdx = 'HF' # noqa: N806
else:
- tmp_doeIdx = 'LF'
+ tmp_doeIdx = 'LF' # noqa: N806
else:
- tmp_doeIdx = self.doeIdx
+ tmp_doeIdx = self.doeIdx # noqa: N806
if self.do_logtransform:
- Y_hfs = np.log(self.Y_hf)
+ Y_hfs = np.log(self.Y_hf) # noqa: N806
else:
- Y_hfs = self.Y_hf
+ Y_hfs = self.Y_hf # noqa: N806
- NRMSE_val = self.normalized_mean_sq_error(self.Y_cvs, Y_hfs)
+ NRMSE_val = self.normalized_mean_sq_error(self.Y_cvs, Y_hfs) # noqa: N806
self.NRMSE_hist = np.vstack((self.NRMSE_hist, np.array(NRMSE_val)))
self.NRMSE_idx = np.vstack((self.NRMSE_idx, i))
@@ -1285,9 +1285,9 @@ def FEM_batch_lf(X, id_sim):
break
if time.time() - t_init > model_hf.thr_t - self.calib_time:
- n_iter = i
+ n_iter = i # noqa: F841
self.exit_code = 'time'
- doe_off = True
+ doe_off = True # noqa: F841
break
if tmp_doeIdx.startswith('HF'):
@@ -1313,7 +1313,7 @@ def FEM_batch_lf(X, id_sim):
self.X_lf = np.vstack([self.X_lf, x_lf_new])
self.Y_lf = np.vstack([self.Y_lf, y_lf_new])
i = self.id_sim_lf + n_new
- # TODO
+ # TODO # noqa: TD002, TD004
# print(">> {:.2f} s".format(time.time() - t_init))
@@ -1332,11 +1332,11 @@ def FEM_batch_lf(X, id_sim):
self.verify()
self.verify_nugget()
- print(f'my exit code = {self.exit_code}', flush=True)
- print(f'1. count = {self.id_sim_hf}', flush=True)
- print(f'1. count_unique = {self.n_unique_hf}', flush=True)
- print(f'2. max(NRMSE) = {np.max(self.NRMSE_val)}', flush=True)
- print(f'3. time = {self.sim_time:.2f} s', flush=True)
+ print(f'my exit code = {self.exit_code}', flush=True) # noqa: T201
+ print(f'1. count = {self.id_sim_hf}', flush=True) # noqa: T201
+ print(f'1. count_unique = {self.n_unique_hf}', flush=True) # noqa: T201
+ print(f'2. max(NRMSE) = {np.max(self.NRMSE_val)}', flush=True) # noqa: T201
+ print(f'3. time = {self.sim_time:.2f} s', flush=True) # noqa: T201
r"""
@@ -1515,7 +1515,7 @@ def FEM_batch_lf(X, id_sim):
plt.xlabel("CV")
plt.ylabel("Exact")
plt.show()
- """
+ """ # noqa: W291, W293
# plt.show()
# plt.plot(self.Y_cv[:, 1],Y_exact[:,1],'x')
# plt.plot(Y_exact[:, 1],Y_exact[:, 1],'x')
@@ -1530,14 +1530,14 @@ def FEM_batch_lf(X, id_sim):
#
# self.m_list[i].predict()
- def verify(self):
- Y_cv = self.Y_cv
- Y = self.Y_hf
+ def verify(self): # noqa: D102
+ Y_cv = self.Y_cv # noqa: N806
+ Y = self.Y_hf # noqa: N806
model_hf = self.modelInfoHF
if model_hf.is_model:
n_err = 1000
- Xerr = model_hf.resampling(self.m_list[0].X, n_err)
+ Xerr = model_hf.resampling(self.m_list[0].X, n_err) # noqa: N806
y_pred_var = np.zeros((n_err, self.y_dim))
y_data_var = np.zeros((n_err, self.y_dim))
@@ -1573,8 +1573,8 @@ def verify(self):
# else:
# y_pred_var[ns, ny] = y_pred_vars
- error_ratio2_Pr = y_pred_var / y_data_var
- print(np.max(error_ratio2_Pr, axis=0), flush=True)
+ error_ratio2_Pr = y_pred_var / y_data_var # noqa: N806
+ print(np.max(error_ratio2_Pr, axis=0), flush=True) # noqa: T201
perc_thr_tmp = np.hstack(
[np.array([1]), np.arange(10, 1000, 50), np.array([999])]
@@ -1591,7 +1591,7 @@ def verify(self):
self.perc_val = 0
corr_val = np.zeros((self.y_dim,))
- R2_val = np.zeros((self.y_dim,))
+ R2_val = np.zeros((self.y_dim,)) # noqa: N806
for ny in range(self.y_dim):
corr_val[ny] = np.corrcoef(Y[:, ny], Y_cv[:, ny])[0, 1]
R2_val[ny] = 1 - np.sum(pow(Y_cv[:, ny] - Y[:, ny], 2)) / np.sum(
@@ -1604,11 +1604,11 @@ def verify(self):
self.corr_val = corr_val
self.R2_val = R2_val
- def verify_nugget(self):
- Y_cv = self.Y_cv
- Y_cv_var_w_measure = self.Y_cv_var_w_measure
- Y = self.Y_hf
- model_hf = self.modelInfoHF
+ def verify_nugget(self): # noqa: D102
+ Y_cv = self.Y_cv # noqa: N806
+ Y_cv_var_w_measure = self.Y_cv_var_w_measure # noqa: N806
+ Y = self.Y_hf # noqa: N806
+ model_hf = self.modelInfoHF # noqa: F841
self.inbound50 = np.zeros((self.y_dim,))
self.Gausspvalue = np.zeros((self.y_dim,))
@@ -1616,12 +1616,12 @@ def verify_nugget(self):
if not self.do_mf:
for ny in range(self.y_dim):
if not self.do_logtransform:
- PI_lb = norm.ppf(
+ PI_lb = norm.ppf( # noqa: N806
0.25,
loc=Y_cv[:, ny],
scale=np.sqrt(Y_cv_var_w_measure[:, ny]),
)
- PI_ub = norm.ppf(
+ PI_ub = norm.ppf( # noqa: N806
0.75,
loc=Y_cv[:, ny],
scale=np.sqrt(Y_cv_var_w_measure[:, ny]),
@@ -1638,15 +1638,15 @@ def verify_nugget(self):
# sigm = np.sqrt(
# np.log(Y_cv_var_w_measure[:, ny] / pow(Y_cv[:, ny], 2) + 1)
# )
- log_Y_cv = self.Y_cvs[:, ny]
- log_Y_cv_var_w_measure = self.Y_cv_var_w_measures[:, ny]
+ log_Y_cv = self.Y_cvs[:, ny] # noqa: N806
+ log_Y_cv_var_w_measure = self.Y_cv_var_w_measures[:, ny] # noqa: N806
# PI_lb = lognorm.ppf(0.25, s=sigm, scale=np.exp(mu)).tolist()
# PI_ub = lognorm.ppf(0.75, s=sigm, scale=np.exp(mu)).tolist()
- PI_lb = norm.ppf(
+ PI_lb = norm.ppf( # noqa: N806
0.25, loc=log_Y_cv, scale=np.sqrt(log_Y_cv_var_w_measure)
).tolist()
- PI_ub = norm.ppf(
+ PI_ub = norm.ppf( # noqa: N806
0.75, loc=log_Y_cv, scale=np.sqrt(log_Y_cv_var_w_measure)
).tolist()
num_in_bound = np.sum(
@@ -1664,7 +1664,7 @@ def verify_nugget(self):
else:
pass
- def save_model(self, filename):
+ def save_model(self, filename): # noqa: C901, D102, PLR0915
if self.isEEUQ:
self.rv_name_new = []
for nx in range(self.x_dim):
@@ -1678,7 +1678,7 @@ def save_model(self, filename):
self.x_dim = len(self.rv_name_new)
if self.do_mf:
- with open(self.work_dir + '/' + filename + '.pkl', 'wb') as file:
+ with open(self.work_dir + '/' + filename + '.pkl', 'wb') as file: # noqa: PTH123
pickle.dump(self.m_list, file)
header_string_x = ' ' + ' '.join([str(elem) for elem in self.rv_name]) + ' '
@@ -1801,7 +1801,7 @@ def save_model(self, filename):
results = {}
- hfJson = {}
+ hfJson = {} # noqa: N806
hfJson['doSampling'] = self.modelInfoHF.is_model
hfJson['doSimulation'] = self.modelInfoHF.is_model
hfJson['DoEmethod'] = self.modelInfoHF.doe_method
@@ -1810,12 +1810,12 @@ def save_model(self, filename):
hfJson['valSampUnique'] = self.n_unique_hf
hfJson['valSim'] = self.id_sim_hf
- constIdx = []
- constVal = []
+ constIdx = [] # noqa: N806
+ constVal = [] # noqa: N806
for ny in range(self.y_dim):
if np.var(self.Y_hf[:, ny]) == 0:
- constIdx += [ny]
- constVal += [np.mean(self.Y_hf[:, ny])]
+ constIdx += [ny] # noqa: N806
+ constVal += [np.mean(self.Y_hf[:, ny])] # noqa: N806
hfJson['constIdx'] = constIdx
hfJson['constVal'] = constVal
@@ -1829,7 +1829,7 @@ def save_model(self, filename):
if self.isEEUQ:
if len(self.IM_names) > 0:
- IM_sub_Json = {}
+ IM_sub_Json = {} # noqa: N806
IM_sub_Json['IntensityMeasure'] = self.intensityMeasure
IM_sub_Json['GeneralInformation'] = {'units': self.unitInfo}
IM_sub_Json['Events'] = {}
@@ -1838,7 +1838,7 @@ def save_model(self, filename):
results['highFidelityInfo'] = hfJson
- lfJson = {}
+ lfJson = {} # noqa: N806
if self.do_mf:
lfJson['doSampling'] = self.modelInfoLF.is_data
lfJson['doSimulation'] = self.modelInfoLF.is_model
@@ -1956,7 +1956,7 @@ def save_model(self, filename):
rvs['name'] = self.rvName[nx]
rvs['distribution'] = self.rvDist[nx]
rvs['value'] = self.rvVal[nx]
- rv_list = rv_list + [rvs]
+ rv_list = rv_list + [rvs] # noqa: RUF005
results['randomVariables'] = rv_list
# Used for surrogate
@@ -1967,7 +1967,7 @@ def save_model(self, filename):
results['modelInfo'][self.g_name[ny] + '_Var'] = {}
for parname in self.m_var_list[ny].parameter_names():
results['modelInfo'][self.g_name[ny] + '_Var'][parname] = list(
- eval('self.m_var_list[ny].' + parname)
+ eval('self.m_var_list[ny].' + parname) # noqa: S307
)
results['modelInfo'][self.g_name[ny] + '_Var'][
'TrainingSamplesY'
@@ -1980,29 +1980,29 @@ def save_model(self, filename):
results['modelInfo'][self.g_name[ny]] = {}
for parname in self.m_list[ny].parameter_names():
results['modelInfo'][self.g_name[ny]][parname] = list(
- eval('self.m_list[ny].' + parname)
+ eval('self.m_list[ny].' + parname) # noqa: S307
)
if self.isEEUQ:
# read SAM.json
- SAMpath = self.work_dir + '/templatedir/SAM.json'
+ SAMpath = self.work_dir + '/templatedir/SAM.json' # noqa: N806
try:
- with open(SAMpath, encoding='utf-8') as f:
- SAMjson = json.load(f)
- except Exception:
- with open(SAMpath + '.sc', encoding='utf-8') as f:
- SAMjson = json.load(f)
-
- EDPpath = self.work_dir + '/templatedir/EDP.json'
- with open(EDPpath, encoding='utf-8') as f:
- EDPjson = json.load(f)
+ with open(SAMpath, encoding='utf-8') as f: # noqa: PTH123
+ SAMjson = json.load(f) # noqa: N806
+ except Exception: # noqa: BLE001
+ with open(SAMpath + '.sc', encoding='utf-8') as f: # noqa: PTH123
+ SAMjson = json.load(f) # noqa: N806
+
+ EDPpath = self.work_dir + '/templatedir/EDP.json' # noqa: N806
+ with open(EDPpath, encoding='utf-8') as f: # noqa: PTH123
+ EDPjson = json.load(f) # noqa: N806
results['SAM'] = SAMjson
results['EDP'] = EDPjson
- with open(self.work_dir + '/dakota.out', 'w', encoding='utf-8') as fp:
+ with open(self.work_dir + '/dakota.out', 'w', encoding='utf-8') as fp: # noqa: PTH123
json.dump(results, fp, indent=1)
- with open(self.work_dir + '/GPresults.out', 'w') as file:
+ with open(self.work_dir + '/GPresults.out', 'w') as file: # noqa: PTH123
file.write('* Problem setting\n')
file.write(f' - dimension of x : {self.x_dim}\n')
file.write(f' - dimension of y : {self.y_dim}\n')
@@ -2076,7 +2076,7 @@ def save_model(self, filename):
m_tmp = self.m_list[ny]
for parname in m_tmp.parameter_names():
file.write(f' - {parname} ')
- parvals = eval('m_tmp.' + parname)
+ parvals = eval('m_tmp.' + parname) # noqa: S307
if len(parvals) == self.x_dim:
file.write('\n')
for nx in range(self.x_dim):
@@ -2087,25 +2087,25 @@ def save_model(self, filename):
file.write(f' : {parvals[0]:.2e}\n')
file.write('\n'.format())
- print('Results Saved', flush=True)
+ print('Results Saved', flush=True) # noqa: T201
return 0
- def run_design_of_experiments(self, nc1, nq, e2, doeIdx='HF'):
+ def run_design_of_experiments(self, nc1, nq, e2, doeIdx='HF'): # noqa: C901, N803, D102, PLR0912, PLR0915
if doeIdx == 'LF':
- lfset = set([tuple(x) for x in self.X_lf.tolist()])
- hfset = set([tuple(x) for x in self.X_hf.tolist()])
+ lfset = set([tuple(x) for x in self.X_lf.tolist()]) # noqa: C403
+ hfset = set([tuple(x) for x in self.X_hf.tolist()]) # noqa: C403
hfsamples = hfset - lfset
if len(hfsamples) == 0:
lf_additional_candi = np.zeros((0, self.x_dim))
else:
lf_additional_candi = np.array([np.array(x) for x in hfsamples])
- def sampling(N):
+ def sampling(N): # noqa: N803
return model_lf.sampling(N)
else:
- def sampling(N):
+ def sampling(N): # noqa: N803
return model_hf.sampling(N)
# doeIdx = 0
@@ -2116,11 +2116,11 @@ def sampling(N):
model_hf = self.modelInfoHF
model_lf = self.modelInfoLF
- X_hf = self.X_hf
- Y_hf = self.Y_hf
- X_lf = self.X_lf
- Y_lf = self.Y_lf
- ll = self.ll # TODO which ll?
+ X_hf = self.X_hf # noqa: N806
+ Y_hf = self.Y_hf # noqa: N806
+ X_lf = self.X_lf # noqa: N806
+ Y_lf = self.Y_lf # noqa: N806
+ ll = self.ll # TODO which ll? # noqa: TD002, TD004
y_var = np.var(Y_hf, axis=0) # normalization
y_idx = np.argmax(np.sum(e2 / y_var, axis=0))
@@ -2163,7 +2163,7 @@ def sampling(N):
# cri2[i] = sum(e2[:, y_idx] / Y_pred_var[:, y_idx] * wei.T)
cri2[i] = sum(e2[:, y_idx] * wei.T)
- VOI = np.zeros(yc1_pred.shape)
+ VOI = np.zeros(yc1_pred.shape) # noqa: N806
for i in range(nc1):
pdfvals = (
m_stack.kern.K(np.array([xq[i]]), xq) ** 2
@@ -2180,9 +2180,9 @@ def sampling(N):
logcrimi2 = np.log(cri2[:, 0])
rankid = np.zeros(nc1)
- varRank = np.zeros(nc1)
- biasRank = np.zeros(nc1)
- for id in range(nc1):
+ varRank = np.zeros(nc1) # noqa: N806
+ biasRank = np.zeros(nc1) # noqa: N806
+ for id in range(nc1): # noqa: A001
idx_tmp = np.argwhere(
(logcrimi1 >= logcrimi1[id]) * (logcrimi2 >= logcrimi2[id])
)
@@ -2194,11 +2194,11 @@ def sampling(N):
idx_1rank = list((np.argwhere(rankid == 1)).flatten())
if doeIdx.startswith('HF'):
- X_stack = X_hf
- Y_stack = Y_hf[:, y_idx][np.newaxis].T
+ X_stack = X_hf # noqa: N806
+ Y_stack = Y_hf[:, y_idx][np.newaxis].T # noqa: N806
elif doeIdx.startswith('LF'):
- X_stack = X_lf
- Y_stack = Y_lf[:, y_idx][np.newaxis].T
+ X_stack = X_lf # noqa: N806
+ Y_stack = Y_lf[:, y_idx][np.newaxis].T # noqa: N806
if num_1rank < self.cal_interval:
# When number of pareto is smaller than cal_interval
@@ -2221,10 +2221,10 @@ def sampling(N):
idx_pareto_new = [best_global]
del idx_pareto_candi[best_local]
- for i in range(self.cal_interval - 1):
- X_stack = np.vstack([X_stack, xc1[best_global, :][np.newaxis]])
+ for i in range(self.cal_interval - 1): # noqa: B007
+ X_stack = np.vstack([X_stack, xc1[best_global, :][np.newaxis]]) # noqa: N806
# any variables
- Y_stack = np.vstack([Y_stack, np.zeros((1, 1))])
+ Y_stack = np.vstack([Y_stack, np.zeros((1, 1))]) # noqa: N806
if doeIdx.startswith('HF'):
m_stack = self.set_XY(m_stack, y_idx, X_stack, Y_stack)
@@ -2233,7 +2233,7 @@ def sampling(N):
m_tmp, y_idx, self.X_hf, self.Y_hf, X_stack, Y_stack
)
- dummy, Yq_var = self.predict(m_stack, xc1[idx_pareto_candi, :])
+ dummy, Yq_var = self.predict(m_stack, xc1[idx_pareto_candi, :]) # noqa: N806
cri1 = Yq_var * VOI[idx_pareto_candi]
cri1 = (cri1 - np.min(cri1)) / (np.max(cri1) - np.min(cri1))
score_tmp = (
@@ -2242,7 +2242,7 @@ def sampling(N):
best_local = np.argsort(-np.squeeze(score_tmp))[0]
best_global = idx_pareto_candi[best_local]
- idx_pareto_new = idx_pareto_new + [best_global]
+ idx_pareto_new = idx_pareto_new + [best_global] # noqa: RUF005
del idx_pareto_candi[best_local]
idx_pareto = idx_pareto_new
@@ -2254,11 +2254,11 @@ def sampling(N):
update_score = np.zeros((self.cal_interval, 1))
if doeIdx.startswith('HF'):
- X_stack = X_hf
- Y_stack = Y_hf[:, y_idx][np.newaxis].T
+ X_stack = X_hf # noqa: N806
+ Y_stack = Y_hf[:, y_idx][np.newaxis].T # noqa: N806
elif doeIdx.startswith('LF'):
- X_stack = X_lf
- Y_stack = Y_lf[:, y_idx][np.newaxis].T
+ X_stack = X_lf # noqa: N806
+ Y_stack = Y_lf[:, y_idx][np.newaxis].T # noqa: N806
for ni in range(self.cal_interval):
#
@@ -2271,9 +2271,9 @@ def sampling(N):
xq = sampling(nq) # same for hf/lf
- dummy, Yq_var = self.predict(m_stack, xq)
+ dummy, Yq_var = self.predict(m_stack, xq) # noqa: N806
if ni == 0:
- IMSEbase = 1 / xq.shape[0] * sum(Yq_var.flatten())
+ IMSEbase = 1 / xq.shape[0] * sum(Yq_var.flatten()) # noqa: N806
tmp = time.time()
if self.do_parallel:
@@ -2290,25 +2290,25 @@ def sampling(N):
for i in range(nc1)
)
result_objs = list(self.pool.starmap(imse, iterables))
- IMSEc1 = np.zeros(nc1)
- for IMSE_val, idx in result_objs:
+ IMSEc1 = np.zeros(nc1) # noqa: N806
+ for IMSE_val, idx in result_objs: # noqa: N806
IMSEc1[idx] = IMSE_val
- print(
+ print( # noqa: T201
f'IMSE: finding the next DOE {ni} - parallel .. time = {time.time() - tmp:.2f}'
) # 7s # 3-4s
- # TODO: terminate it gracefully....
+ # TODO: terminate it gracefully.... # noqa: TD002
# see https://stackoverflow.com/questions/21104997/keyboard-interrupt-with-pythons-multiprocessing
try:
while True:
time.sleep(0.5)
- if all([r.ready() for r in result]):
+ if all([r.ready() for r in result]): # noqa: C419, F821
break
except KeyboardInterrupt:
- pool.terminate()
- pool.join()
+ pool.terminate() # noqa: F821
+ pool.join() # noqa: F821
else:
- IMSEc1 = np.zeros(nc1)
+ IMSEc1 = np.zeros(nc1) # noqa: N806
for i in range(nc1):
IMSEc1[i], dummy = imse(
copy.deepcopy(m_stack),
@@ -2319,16 +2319,16 @@ def sampling(N):
y_idx,
doeIdx,
)
- print(
+ print( # noqa: T201
f'IMSE: finding the next DOE {ni} - serial .. time = {time.time() - tmp}'
) # 4s
new_idx = np.argmin(IMSEc1, axis=0)
x_point = xc1[new_idx, :][np.newaxis]
- X_stack = np.vstack([X_stack, x_point])
+ X_stack = np.vstack([X_stack, x_point]) # noqa: N806
# any variables
- Y_stack = np.vstack([Y_stack, np.zeros((1, 1))])
+ Y_stack = np.vstack([Y_stack, np.zeros((1, 1))]) # noqa: N806
update_point[ni, :] = x_point
if doeIdx == 'HFHF':
@@ -2356,14 +2356,14 @@ def sampling(N):
elif self.doe_method == 'imsew':
update_point = np.zeros((self.cal_interval, self.x_dim))
- update_score = np.zeros((self.cal_interval, 1))
+ update_score = np.zeros((self.cal_interval, 1)) # noqa: F841
if doeIdx.startswith('HF'):
- X_stack = X_hf
- Y_stack = Y_hf[:, y_idx][np.newaxis].T
+ X_stack = X_hf # noqa: N806
+ Y_stack = Y_hf[:, y_idx][np.newaxis].T # noqa: N806
elif doeIdx.startswith('LF'):
- X_stack = X_lf
- Y_stack = Y_lf[:, y_idx][np.newaxis].T
+ X_stack = X_lf # noqa: N806
+ Y_stack = Y_lf[:, y_idx][np.newaxis].T # noqa: N806
for ni in range(self.cal_interval):
#
@@ -2381,9 +2381,9 @@ def sampling(N):
phiq[i, :] = e2[closest_node(xq[i, :], X_hf, ll)]
phiqr = pow(phiq[:, y_idx], r)
- dummy, Yq_var = self.predict(m_stack, xq)
+ dummy, Yq_var = self.predict(m_stack, xq) # noqa: N806
if ni == 0:
- IMSEbase = (
+ IMSEbase = ( # noqa: N806
1 / xq.shape[0] * sum(phiqr.flatten() * Yq_var.flatten())
)
@@ -2402,14 +2402,14 @@ def sampling(N):
for i in range(nc1)
)
result_objs = list(self.pool.starmap(imse, iterables))
- IMSEc1 = np.zeros(nc1)
- for IMSE_val, idx in result_objs:
+ IMSEc1 = np.zeros(nc1) # noqa: N806
+ for IMSE_val, idx in result_objs: # noqa: N806
IMSEc1[idx] = IMSE_val
- print(
+ print( # noqa: T201
f'IMSE: finding the next DOE {ni} - parallel .. time = {time.time() - tmp:.2f}'
) # 7s # 3-4s
else:
- IMSEc1 = np.zeros(nc1)
+ IMSEc1 = np.zeros(nc1) # noqa: N806
for i in range(nc1):
IMSEc1[i], dummy = imse(
copy.deepcopy(m_stack),
@@ -2422,17 +2422,17 @@ def sampling(N):
)
if np.mod(i, 200) == 0:
# 4s
- print(f'IMSE iter {ni}, candi {i}/{nc1}')
- print(
+ print(f'IMSE iter {ni}, candi {i}/{nc1}') # noqa: T201
+ print( # noqa: T201
f'IMSE: finding the next DOE {ni} - serial .. time = {time.time() - tmp}'
) # 4s
new_idx = np.argmin(IMSEc1, axis=0)
x_point = xc1[new_idx, :][np.newaxis]
- X_stack = np.vstack([X_stack, x_point])
+ X_stack = np.vstack([X_stack, x_point]) # noqa: N806
# any variables
- Y_stack = np.vstack([Y_stack, np.zeros((1, 1))])
+ Y_stack = np.vstack([Y_stack, np.zeros((1, 1))]) # noqa: N806
update_point[ni, :] = x_point
if doeIdx == 'HFHF':
@@ -2460,11 +2460,11 @@ def sampling(N):
elif self.doe_method == 'mmsew':
if doeIdx.startswith('HF'):
- X_stack = X_hf
- Y_stack = Y_hf[:, y_idx][np.newaxis].T
+ X_stack = X_hf # noqa: N806
+ Y_stack = Y_hf[:, y_idx][np.newaxis].T # noqa: N806
elif doeIdx.startswith('LF'):
- X_stack = X_lf
- Y_stack = Y_lf[:, y_idx][np.newaxis].T
+ X_stack = X_lf # noqa: N806
+ Y_stack = Y_lf[:, y_idx][np.newaxis].T # noqa: N806
update_point = np.zeros((self.cal_interval, self.x_dim))
@@ -2480,13 +2480,13 @@ def sampling(N):
phicr = pow(phic[:, y_idx], r)
yc1_pred, yc1_var = self.predict(m_stack, xc1) # use only variance
- MMSEc1 = yc1_var.flatten() * phicr.flatten()
+ MMSEc1 = yc1_var.flatten() * phicr.flatten() # noqa: N806
new_idx = np.argmax(MMSEc1, axis=0)
x_point = xc1[new_idx, :][np.newaxis]
- X_stack = np.vstack([X_stack, x_point])
+ X_stack = np.vstack([X_stack, x_point]) # noqa: N806
# any variables
- Y_stack = np.vstack([Y_stack, np.zeros((1, 1))])
+ Y_stack = np.vstack([Y_stack, np.zeros((1, 1))]) # noqa: N806
# m_stack.set_XY(X=X_stack, Y=Y_stack)
if doeIdx.startswith('HF'):
m_stack = self.set_XY(m_stack, y_idx, X_stack, Y_stack)
@@ -2500,11 +2500,11 @@ def sampling(N):
elif self.doe_method == 'mmse':
if doeIdx.startswith('HF'):
- X_stack = X_hf
- Y_stack = Y_hf[:, y_idx][np.newaxis].T
+ X_stack = X_hf # noqa: N806
+ Y_stack = Y_hf[:, y_idx][np.newaxis].T # noqa: N806
elif doeIdx.startswith('LF'):
- X_stack = X_lf
- Y_stack = Y_lf[:, y_idx][np.newaxis].T
+ X_stack = X_lf # noqa: N806
+ Y_stack = Y_lf[:, y_idx][np.newaxis].T # noqa: N806
update_point = np.zeros((self.cal_interval, self.x_dim))
@@ -2515,13 +2515,13 @@ def sampling(N):
nc1 = xc1.shape[0]
yc1_pred, yc1_var = self.predict(m_stack, xc1) # use only variance
- MMSEc1 = yc1_var.flatten()
+ MMSEc1 = yc1_var.flatten() # noqa: N806
new_idx = np.argmax(MMSEc1, axis=0)
x_point = xc1[new_idx, :][np.newaxis]
- X_stack = np.vstack([X_stack, x_point])
+ X_stack = np.vstack([X_stack, x_point]) # noqa: N806
# any variables
- Y_stack = np.vstack([Y_stack, np.zeros((1, 1))])
+ Y_stack = np.vstack([Y_stack, np.zeros((1, 1))]) # noqa: N806
# m_stack.set_XY(X=X_stack, Y=Y_stack)
# if doeIdx.startswith("HF"):
@@ -2563,24 +2563,24 @@ def sampling(N):
return update_point, y_idx, score
- def normalized_mean_sq_error(self, yp, ye):
+ def normalized_mean_sq_error(self, yp, ye): # noqa: D102
n = yp.shape[0]
data_bound = np.max(ye, axis=0) - np.min(ye, axis=0)
- RMSE = np.sqrt(1 / n * np.sum(pow(yp - ye, 2), axis=0))
- NRMSE = RMSE / data_bound
+ RMSE = np.sqrt(1 / n * np.sum(pow(yp - ye, 2), axis=0)) # noqa: N806
+ NRMSE = RMSE / data_bound # noqa: N806
NRMSE[np.argwhere(data_bound == 0)] = 0
return NRMSE
- def get_cross_validation_err(self):
- print('Calculating cross validation errors', flush=True)
+ def get_cross_validation_err(self): # noqa: D102
+ print('Calculating cross validation errors', flush=True) # noqa: T201
time_tmp = time.time()
- X_hf = self.X_hf # contains separate samples
- Y_hf = self.Y_hf
+ X_hf = self.X_hf # contains separate samples # noqa: N806
+ Y_hf = self.Y_hf # noqa: N806
e2 = np.zeros(Y_hf.shape) # only for unique...
- Y_pred = np.zeros(Y_hf.shape)
- Y_pred_var = np.zeros(Y_hf.shape)
- Y_pred_var_w_measure = np.zeros(Y_hf.shape)
+ Y_pred = np.zeros(Y_hf.shape) # noqa: N806
+ Y_pred_var = np.zeros(Y_hf.shape) # noqa: N806
+ Y_pred_var_w_measure = np.zeros(Y_hf.shape) # noqa: N806
#
# Efficient cross validation TODO: check if it works for heteroskedacstic
#
@@ -2595,8 +2595,8 @@ def get_cross_validation_err(self):
indices = self.indices_unique
for ny in range(Y_hf.shape[1]):
- Xm = self.m_list[ny].X # contains unique samples
- Ym = self.m_list[ny].Y
+ Xm = self.m_list[ny].X # contains unique samples # noqa: N806
+ Ym = self.m_list[ny].Y # noqa: N806
# works both for stochastic/stochastic
nugget_mat = (
@@ -2604,8 +2604,8 @@ def get_cross_validation_err(self):
* self.m_list[ny].Gaussian_noise.parameters
)
- Rmat = self.m_list[ny].kern.K(Xm)
- Rinv = np.linalg.inv(Rmat + nugget_mat)
+ Rmat = self.m_list[ny].kern.K(Xm) # noqa: N806
+ Rinv = np.linalg.inv(Rmat + nugget_mat) # noqa: N806
e = np.squeeze(
np.matmul(Rinv, (Ym - self.normMeans[ny]))
) / np.squeeze(np.diag(Rinv))
@@ -2629,18 +2629,18 @@ def get_cross_validation_err(self):
)
else:
- Y_pred2 = np.zeros(Y_hf.shape)
- Y_pred_var2 = np.zeros(Y_hf.shape)
+ Y_pred2 = np.zeros(Y_hf.shape) # noqa: N806
+ Y_pred_var2 = np.zeros(Y_hf.shape) # noqa: N806
e22 = np.zeros(Y_hf.shape)
for ny in range(Y_hf.shape[1]):
m_tmp = copy.deepcopy(self.m_list[ny])
for ns in range(X_hf.shape[0]):
- X_tmp = np.delete(X_hf, ns, axis=0)
- Y_tmp = np.delete(Y_hf, ns, axis=0)
+ X_tmp = np.delete(X_hf, ns, axis=0) # noqa: N806
+ Y_tmp = np.delete(Y_hf, ns, axis=0) # noqa: N806
if self.stochastic:
- Y_meta_tmp = m_tmp.Y_metadata
+ Y_meta_tmp = m_tmp.Y_metadata # noqa: N806
Y_meta_tmp['variance_structure'] = np.delete(
m_tmp.Y_metadata['variance_structure'], ns, axis=0
)
@@ -2652,7 +2652,7 @@ def get_cross_validation_err(self):
else:
m_tmp.set_XY(X_tmp, Y_tmp[:, ny][np.newaxis].transpose())
- print(ns)
+ print(ns) # noqa: T201
# m_tmp = self.set_XY(
# m_tmp,
# ny,
@@ -2663,20 +2663,20 @@ def get_cross_validation_err(self):
# )
x_loo = X_hf[ns, :][np.newaxis]
- Y_pred_tmp, Y_err_tmp = self.predict(m_tmp, x_loo)
+ Y_pred_tmp, Y_err_tmp = self.predict(m_tmp, x_loo) # noqa: N806
Y_pred2[ns, ny] = Y_pred_tmp
Y_pred_var2[ns, ny] = Y_err_tmp
if self.do_logtransform:
- Y_exact = np.log(Y_hf[ns, ny])
+ Y_exact = np.log(Y_hf[ns, ny]) # noqa: N806
else:
- Y_exact = Y_hf[ns, ny]
+ Y_exact = Y_hf[ns, ny] # noqa: N806
e22[ns, ny] = pow((Y_pred_tmp - Y_exact), 2) # for nD outputs
- Y_pred = Y_pred2
- Y_pred_var = Y_pred_var2
+ Y_pred = Y_pred2 # noqa: N806
+ Y_pred_var = Y_pred_var2 # noqa: N806
if not self.do_mf:
Y_pred_var_w_measure[:, ny] = (
Y_pred_var2[:, ny]
@@ -2684,7 +2684,7 @@ def get_cross_validation_err(self):
* self.normVars[ny]
)
else:
- # TODO account for Gaussian_noise.parameters as well
+ # TODO account for Gaussian_noise.parameters as well # noqa: TD002, TD004
Y_pred_var_w_measure[:, ny] = (
Y_pred_var2[:, ny]
+ self.m_list[
@@ -2702,78 +2702,78 @@ def get_cross_validation_err(self):
plt.title("With nugget (Linear)"); plt.xlabel("Training sample id"); plt.ylabel("LOOCV variance (before multiplying $\sigma_z^2$)"); plt.legend(["Closedform","iteration"]);
plt.show();
- """
- print(
+ """ # noqa: W291, W293
+ print( # noqa: T201
f' Cross validation calculation time: {time.time() - time_tmp:.2f} s',
flush=True,
)
return Y_pred, Y_pred_var, Y_pred_var_w_measure, e2
-def imse(m_tmp, xcandi, xq, phiqr, i, y_idx, doeIdx='HF'):
+def imse(m_tmp, xcandi, xq, phiqr, i, y_idx, doeIdx='HF'): # noqa: ARG001, N803, D103
if doeIdx == 'HF':
- X = m_tmp.X
- Y = m_tmp.Y
- X_tmp = np.vstack([X, xcandi])
+ X = m_tmp.X # noqa: N806
+ Y = m_tmp.Y # noqa: N806
+ X_tmp = np.vstack([X, xcandi]) # noqa: N806
# any variables
- Y_tmp = np.vstack([Y, np.zeros((1, Y.shape[1]))])
+ Y_tmp = np.vstack([Y, np.zeros((1, Y.shape[1]))]) # noqa: N806
# self.set_XY(m_tmp, X_tmp, Y_tmp)
m_tmp.set_XY(X_tmp, Y_tmp)
- dummy, Yq_var = m_tmp.predict(xq)
+ dummy, Yq_var = m_tmp.predict(xq) # noqa: N806
elif doeIdx == 'HFHF':
- idxHF = np.argwhere(m_tmp.gpy_model.X[:, -1] == 0).T[0]
- idxLF = np.argwhere(m_tmp.gpy_model.X[:, -1] == 1).T[0]
- X_hf = m_tmp.gpy_model.X[idxHF, :-1]
- Y_hf = m_tmp.gpy_model.Y[idxHF, :]
- X_lf = m_tmp.gpy_model.X[idxLF, :-1]
- Y_lf = m_tmp.gpy_model.Y[idxLF, :]
- X_tmp = np.vstack([X_hf, xcandi])
+ idxHF = np.argwhere(m_tmp.gpy_model.X[:, -1] == 0).T[0] # noqa: N806
+ idxLF = np.argwhere(m_tmp.gpy_model.X[:, -1] == 1).T[0] # noqa: N806
+ X_hf = m_tmp.gpy_model.X[idxHF, :-1] # noqa: N806
+ Y_hf = m_tmp.gpy_model.Y[idxHF, :] # noqa: N806
+ X_lf = m_tmp.gpy_model.X[idxLF, :-1] # noqa: N806
+ Y_lf = m_tmp.gpy_model.Y[idxLF, :] # noqa: N806
+ X_tmp = np.vstack([X_hf, xcandi]) # noqa: N806
# any variables
- Y_tmp = np.vstack([Y_hf, np.zeros((1, Y_hf.shape[1]))])
+ Y_tmp = np.vstack([Y_hf, np.zeros((1, Y_hf.shape[1]))]) # noqa: N806
# self.set_XY(m_tmp, X_tmp, Y_tmp, X_lf, Y_lf)
- X_list_tmp, Y_list_tmp = (
- emf.convert_lists_to_array.convert_xy_lists_to_arrays(
+ X_list_tmp, Y_list_tmp = ( # noqa: N806
+ emf.convert_lists_to_array.convert_xy_lists_to_arrays( # noqa: F821
[X_tmp, X_lf], [Y_tmp, Y_lf]
)
)
m_tmp.set_data(X=X_list_tmp, Y=Y_list_tmp)
- xq_list = convert_x_list_to_array([xq, np.zeros((0, xq.shape[1]))])
- dummy, Yq_var = m_tmp.predict(xq_list)
+ xq_list = convert_x_list_to_array([xq, np.zeros((0, xq.shape[1]))]) # noqa: F821
+ dummy, Yq_var = m_tmp.predict(xq_list) # noqa: N806
elif doeIdx.startswith('LF'):
- idxHF = np.argwhere(m_tmp.gpy_model.X[:, -1] == 0).T[0]
- idxLF = np.argwhere(m_tmp.gpy_model.X[:, -1] == 1).T[0]
- X_hf = m_tmp.gpy_model.X[idxHF, :-1]
- Y_hf = m_tmp.gpy_model.Y[idxHF, :]
- X_lf = m_tmp.gpy_model.X[idxLF, :-1]
- Y_lf = m_tmp.gpy_model.Y[idxLF, :]
- X_tmp = np.vstack([X_lf, xcandi])
+ idxHF = np.argwhere(m_tmp.gpy_model.X[:, -1] == 0).T[0] # noqa: N806
+ idxLF = np.argwhere(m_tmp.gpy_model.X[:, -1] == 1).T[0] # noqa: N806
+ X_hf = m_tmp.gpy_model.X[idxHF, :-1] # noqa: N806
+ Y_hf = m_tmp.gpy_model.Y[idxHF, :] # noqa: N806
+ X_lf = m_tmp.gpy_model.X[idxLF, :-1] # noqa: N806
+ Y_lf = m_tmp.gpy_model.Y[idxLF, :] # noqa: N806
+ X_tmp = np.vstack([X_lf, xcandi]) # noqa: N806
# any variables
- Y_tmp = np.vstack([Y_lf, np.zeros((1, Y_lf.shape[1]))])
+ Y_tmp = np.vstack([Y_lf, np.zeros((1, Y_lf.shape[1]))]) # noqa: N806
# self.set_XY(m_tmp, X_hf, Y_hf, X_tmp, Y_tmp)
- X_list_tmp, Y_list_tmp = (
- emf.convert_lists_to_array.convert_xy_lists_to_arrays(
+ X_list_tmp, Y_list_tmp = ( # noqa: N806
+ emf.convert_lists_to_array.convert_xy_lists_to_arrays( # noqa: F821
[X_hf, X_tmp], [Y_hf, Y_tmp]
)
)
m_tmp.set_data(X=X_list_tmp, Y=Y_list_tmp)
- xq_list = convert_x_list_to_array([xq, np.zeros((0, xq.shape[1]))])
- dummy, Yq_var = m_tmp.predict(xq_list)
+ xq_list = convert_x_list_to_array([xq, np.zeros((0, xq.shape[1]))]) # noqa: F821
+ dummy, Yq_var = m_tmp.predict(xq_list) # noqa: N806
else:
- print(f'doe method <{doeIdx}> is not supported', flush=True)
+ print(f'doe method <{doeIdx}> is not supported', flush=True) # noqa: T201
# dummy, Yq_var = self.predict(m_tmp,xq)
- IMSEc1 = 1 / xq.shape[0] * sum(phiqr.flatten() * Yq_var.flatten())
+ IMSEc1 = 1 / xq.shape[0] * sum(phiqr.flatten() * Yq_var.flatten()) # noqa: N806
return IMSEc1, i
-class model_info:
- def __init__(
+class model_info: # noqa: D101
+ def __init__( # noqa: C901
self,
- surrogateJson,
- rvJson,
+ surrogateJson, # noqa: N803
+ rvJson, # noqa: N803
work_dir,
x_dim,
y_dim,
@@ -2781,9 +2781,9 @@ def __init__(
idx=0,
):
def exit_tmp(msg):
- print(msg)
- print(msg, file=sys.stderr)
- exit(-1)
+ print(msg) # noqa: T201
+ print(msg, file=sys.stderr) # noqa: T201
+ exit(-1) # noqa: PLR1722
# idx = -1 : no info (dummy) paired with 0
# idx = 0 : single fidelity
@@ -2811,7 +2811,7 @@ def exit_tmp(msg):
msg = 'Error reading json: either select "Import Data File" or "Sampling and Simulation"'
exit_tmp(msg)
- elif idx == 1 or idx == 2:
+ elif idx == 1 or idx == 2: # noqa: PLR1714, PLR2004
# MF
self.is_data = True # default
self.is_model = surrogateJson['fromModel']
@@ -2829,14 +2829,14 @@ def exit_tmp(msg):
# high-fidelity
input_file = 'templatedir/inpFile_HF.in'
output_file = 'templatedir/outFile_HF.in'
- elif idx == 2:
+ elif idx == 2: # noqa: PLR2004
# low-fidelity
input_file = 'templatedir/inpFile_LF.in'
output_file = 'templatedir/outFile_LF.in'
if self.is_data:
- self.inpData = os.path.join(work_dir, input_file)
- self.outData = os.path.join(work_dir, output_file)
+ self.inpData = os.path.join(work_dir, input_file) # noqa: PTH118
+ self.outData = os.path.join(work_dir, output_file) # noqa: PTH118
self.X_existing = read_txt(self.inpData, exit_tmp)
self.n_existing = self.X_existing.shape[0]
@@ -2879,7 +2879,7 @@ def exit_tmp(msg):
else:
try:
self.user_init = surrogateJson['initialDoE']
- except:
+ except: # noqa: E722
self.user_init = -1 # automate
self.nugget_opt = surrogateJson['nuggetOpt']
@@ -2939,7 +2939,7 @@ def exit_tmp(msg):
).T
else:
self.xrange = np.zeros((self.x_dim, 2))
- # TODO should I use "effective" number of dims?
+ # TODO should I use "effective" number of dims? # noqa: TD002, TD004
self.ll = self.xrange[:, 1] - self.xrange[:, 0]
if self.user_init <= 0: # automated choice 8*D
n_init_tmp = int(np.ceil(8 * self.x_dim / n_processor) * n_processor)
@@ -2951,14 +2951,14 @@ def exit_tmp(msg):
# self.n_init = 4
self.doe_method = self.doe_method.lower()
- def sampling(self, n):
+ def sampling(self, n): # noqa: D102
# n is "total" samples
if n > 0:
- X_samples = np.zeros((n, self.x_dim))
+ X_samples = np.zeros((n, self.x_dim)) # noqa: N806
# LHS
sampler = qmc.LatinHypercube(d=self.x_dim)
- U = sampler.random(n=n)
+ U = sampler.random(n=n) # noqa: N806
for nx in range(self.x_dim):
if self.xDistTypeArr[nx] == 'U':
X_samples[:, nx] = (
@@ -2971,7 +2971,7 @@ def sampling(self, n):
if (
self.numRepl
) * self.numSampToBeRepl > 0 and not self.numSampRepldone:
- X_samples = np.vstack(
+ X_samples = np.vstack( # noqa: N806
[
X_samples,
np.tile(
@@ -2982,21 +2982,21 @@ def sampling(self, n):
)
self.numSampRepldone = True
else:
- X_samples = np.zeros((0, self.x_dim))
+ X_samples = np.zeros((0, self.x_dim)) # noqa: N806
return X_samples
- def resampling(self, X, n):
+ def resampling(self, X, n): # noqa: N803, D102
# n is "total" samples
# cube bounds obtained from data
dim = X.shape[1]
minvals = np.min(X, axis=0)
maxvals = np.max(X, axis=0)
- print(dim)
- X_samples = np.zeros((n, dim))
+ print(dim) # noqa: T201
+ X_samples = np.zeros((n, dim)) # noqa: N806
sampler = qmc.LatinHypercube(d=dim)
- U = sampler.random(n=n)
+ U = sampler.random(n=n) # noqa: N806
for nx in range(dim):
X_samples[:, nx] = U[:, nx] * (maxvals[nx] - minvals[nx]) + minvals[nx]
@@ -3027,7 +3027,7 @@ def resampling(self, X, n):
# Additional functions
-def weights_node2(node, nodes, ls):
+def weights_node2(node, nodes, ls): # noqa: D103
nodes = np.asarray(nodes)
deltas = nodes - node
deltas_norm = np.zeros(deltas.shape)
@@ -3042,11 +3042,11 @@ def weights_node2(node, nodes, ls):
return weig / sum(weig)
-def calibrating(
+def calibrating( # noqa: C901, D103
m_tmp,
nugget_opt_tmp,
- nuggetVal,
- normVar,
+ nuggetVal, # noqa: N803
+ normVar, # noqa: N803
do_mf,
do_heteroscedastic,
nopt,
@@ -3063,12 +3063,12 @@ def calibrating(
if not do_mf:
if nugget_opt_tmp == 'Optimize':
# m_tmp[variance_keyword].unfix()
- X = m_tmp.X
+ X = m_tmp.X # noqa: N806
for parname in m_tmp.parameter_names():
if parname.endswith('lengthscale'):
- for nx in range(X.shape[1]):
+ for nx in range(X.shape[1]): # noqa: B007
myrange = np.max(X, axis=0) - np.min(X, axis=0)
- exec('m_tmp.' + parname + '[[nx]] = myrange[nx]')
+ exec('m_tmp.' + parname + '[[nx]] = myrange[nx]') # noqa: S102
elif nugget_opt_tmp == 'Fixed Values':
m_tmp[variance_keyword].constrain_fixed(
@@ -3080,32 +3080,32 @@ def calibrating(
)
elif nugget_opt_tmp == 'Zero':
m_tmp[variance_keyword].constrain_fixed(0, warning=False)
- X = m_tmp.X
+ X = m_tmp.X # noqa: N806
for parname in m_tmp.parameter_names():
if parname.endswith('lengthscale'):
- for nx in range(X.shape[1]):
+ for nx in range(X.shape[1]): # noqa: B007
myrange = np.max(X, axis=0) - np.min(X, axis=0)
- exec('m_tmp.' + parname + '[[nx]] = myrange[nx]')
+ exec('m_tmp.' + parname + '[[nx]] = myrange[nx]') # noqa: S102
elif nugget_opt_tmp == 'Heteroscedastic':
- X = m_tmp.X
+ X = m_tmp.X # noqa: N806
for parname in m_tmp.parameter_names():
if parname.endswith('lengthscale'):
- for nx in range(X.shape[1]):
- myrange = np.max(X, axis=0) - np.min(X, axis=0)
- exec('m_tmp.' + parname + '[[nx]] = myrange[nx]*100')
- exec(
+ for nx in range(X.shape[1]): # noqa: B007
+ myrange = np.max(X, axis=0) - np.min(X, axis=0) # noqa: F841
+ exec('m_tmp.' + parname + '[[nx]] = myrange[nx]*100') # noqa: S102
+ exec( # noqa: S102
'm_tmp.'
+ parname
+ '[[nx]].constrain_bounded(myrange[nx] / X.shape[0], myrange[nx]*100,warning=False)'
)
# m_tmp[parname][nx] = myrange[nx]*100
# m_tmp[parname][nx].constrain_bounded(myrange[nx] / X.shape[0], myrange[nx]*100)
- # TODO change the kernel
+ # TODO change the kernel # noqa: TD002, TD004
else:
msg = 'Nugget keyword not identified: ' + nugget_opt_tmp
if do_mf:
- # TODO: is this right?
+ # TODO: is this right? # noqa: TD002
if nugget_opt_tmp == 'Optimize':
m_tmp.gpy_model.mixed_noise.Gaussian_noise.unfix()
m_tmp.gpy_model.mixed_noise.Gaussian_noise_1.unfix()
@@ -3149,20 +3149,20 @@ def calibrating(
num_processes=n_processor,
verbose=False,
)
- print(m_tmp)
+ print(m_tmp) # noqa: T201
# while n+20 <= nopt:
# m_tmp.optimize_restarts(num_restarts=20)
# n = n+20
# if not nopt==n:
# m_tmp.optimize_restarts(num_restarts=nopt-n)
- print(flush=True)
+ print(flush=True) # noqa: T201
return m_tmp, msg, ny
-def closest_node(x, X, ll):
- X = np.asarray(X)
+def closest_node(x, X, ll): # noqa: N803, D103
+ X = np.asarray(X) # noqa: N806
deltas = X - x
deltas_norm = np.zeros(deltas.shape)
for nx in range(X.shape[1]):
@@ -3172,11 +3172,11 @@ def closest_node(x, X, ll):
return np.argmin(dist_2)
-def read_txt(text_dir, exit_fun):
- if not os.path.exists(text_dir):
+def read_txt(text_dir, exit_fun): # noqa: D103
+ if not os.path.exists(text_dir): # noqa: PTH110
msg = 'Error: file does not exist: ' + text_dir
exit_fun(msg)
- with open(text_dir) as f:
+ with open(text_dir) as f: # noqa: PTH123
# Iterate through the file until the table starts
header_count = 0
for line in f:
@@ -3186,16 +3186,16 @@ def read_txt(text_dir, exit_fun):
break
# print(line)
try:
- with open(text_dir) as f:
- X = np.loadtxt(f, skiprows=header_count)
+ with open(text_dir) as f: # noqa: PTH123, PLW2901
+ X = np.loadtxt(f, skiprows=header_count) # noqa: N806
except ValueError:
- with open(text_dir) as f:
+ with open(text_dir) as f: # noqa: PTH123, PLW2901
try:
- X = np.genfromtxt(f, skip_header=header_count, delimiter=',')
- X = np.atleast_2d(X)
+ X = np.genfromtxt(f, skip_header=header_count, delimiter=',') # noqa: N806
+ X = np.atleast_2d(X) # noqa: N806
# if there are extra delimiter, remove nan
if np.isnan(X[-1, -1]):
- X = np.delete(X, -1, 1)
+ X = np.delete(X, -1, 1) # noqa: N806
# X = np.loadtxt(f, skiprows=header_count, delimiter=',')
except ValueError:
msg = 'Error: unsupported file format ' + text_dir
@@ -3209,7 +3209,7 @@ def read_txt(text_dir, exit_fun):
exit_fun(msg)
if X.ndim == 1:
- X = np.array([X]).transpose()
+ X = np.array([X]).transpose() # noqa: N806
return X
diff --git a/modules/performUQ/UCSD_UQ/UCSD_UQ.py b/modules/performUQ/UCSD_UQ/UCSD_UQ.py
index 97bbeed92..97c952c3d 100644
--- a/modules/performUQ/UCSD_UQ/UCSD_UQ.py
+++ b/modules/performUQ/UCSD_UQ/UCSD_UQ.py
@@ -1,4 +1,4 @@
-import argparse
+import argparse # noqa: INP001, D100
import os
import platform
import shlex
@@ -8,7 +8,7 @@
from pathlib import Path
-def main(args):
+def main(args): # noqa: D103
parser = argparse.ArgumentParser()
parser.add_argument('--workflowInput')
@@ -18,39 +18,39 @@ def main(args):
args, unknowns = parser.parse_known_args()
- workflowInput = args.workflowInput
- workflowOutput = args.workflowOutput
- driverFile = args.driverFile
- runType = args.runType
+ workflowInput = args.workflowInput # noqa: N806
+ workflowOutput = args.workflowOutput # noqa: N806, F841
+ driverFile = args.driverFile # noqa: N806
+ runType = args.runType # noqa: N806
if runType == 'runningLocal':
if platform.system() == 'Windows':
- pythonCommand = 'python'
- driverFile = driverFile + '.bat'
+ pythonCommand = 'python' # noqa: N806
+ driverFile = driverFile + '.bat' # noqa: N806
else:
- pythonCommand = 'python3'
+ pythonCommand = 'python3' # noqa: N806
- mainScriptDir = os.path.dirname(os.path.realpath(__file__))
- mainScript = os.path.join(mainScriptDir, 'mainscript.py')
- templateDir = os.getcwd()
- tmpSimCenterDir = str(Path(templateDir).parents[0])
+ mainScriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N806
+ mainScript = os.path.join(mainScriptDir, 'mainscript.py') # noqa: PTH118, N806
+ templateDir = os.getcwd() # noqa: PTH109, N806
+ tmpSimCenterDir = str(Path(templateDir).parents[0]) # noqa: N806
# Change permission of driver file
- os.chmod(driverFile, stat.S_IXUSR | stat.S_IRUSR | stat.S_IXOTH)
- st = os.stat(driverFile)
- os.chmod(driverFile, st.st_mode | stat.S_IEXEC)
- driverFile = './' + driverFile
- print('WORKFLOW: ' + driverFile)
+ os.chmod(driverFile, stat.S_IXUSR | stat.S_IRUSR | stat.S_IXOTH) # noqa: PTH101
+ st = os.stat(driverFile) # noqa: PTH116
+ os.chmod(driverFile, st.st_mode | stat.S_IEXEC) # noqa: PTH101
+ driverFile = './' + driverFile # noqa: N806
+ print('WORKFLOW: ' + driverFile) # noqa: T201
command = (
f'"{pythonCommand}" "{mainScript}" "{tmpSimCenterDir}"'
f' "{templateDir}" {runType} {driverFile} {workflowInput}'
)
- print(command)
+ print(command) # noqa: T201
command_list = shlex.split(command)
- result = subprocess.run(
+ result = subprocess.run( # noqa: S603, UP022
command_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
@@ -64,7 +64,7 @@ def main(args):
try:
result.check_returncode()
except subprocess.CalledProcessError:
- with open(err_file, 'a') as f:
+ with open(err_file, 'a') as f: # noqa: PTH123
f.write(f'ERROR: {result.stderr}\n\n')
f.write(f'The command was: {result.args}\n\n')
f.write(f'The return code was: {result.returncode}\n\n')
diff --git a/modules/performUQ/UCSD_UQ/calibration_utilities.py b/modules/performUQ/UCSD_UQ/calibration_utilities.py
index 267d29e0e..9b5b5c7e3 100644
--- a/modules/performUQ/UCSD_UQ/calibration_utilities.py
+++ b/modules/performUQ/UCSD_UQ/calibration_utilities.py
@@ -1,4 +1,4 @@
-import os
+import os # noqa: INP001, D100
import shutil
import sys
import time
@@ -24,16 +24,16 @@ def __init__(self, message):
self.message = message
-class CovarianceMatrixPreparer:
+class CovarianceMatrixPreparer: # noqa: D101
def __init__(
self,
- calibrationData: np.ndarray,
- edpLengthsList: list[int],
- edpNamesList: list[str],
- workdirMain: str,
- numExperiments: int,
- logFile: TextIO,
- runType: str,
+ calibrationData: np.ndarray, # noqa: N803
+ edpLengthsList: list[int], # noqa: FA102, N803
+ edpNamesList: list[str], # noqa: FA102, N803
+ workdirMain: str, # noqa: N803
+ numExperiments: int, # noqa: N803
+ logFile: TextIO, # noqa: N803
+ runType: str, # noqa: N803
) -> None:
self.calibrationData = calibrationData
self.edpLengthsList = edpLengthsList
@@ -58,112 +58,112 @@ def __init__(
'the corresponding transformed response data.'
)
- def getDefaultErrorVariances(self):
+ def getDefaultErrorVariances(self): # noqa: N802, D102
# For each response variable, compute the variance of the data. These will be the default error variance
# values used in the calibration process. Values of the multiplier on these default error variance values will be
# calibrated. There will be one such error variance value per response quantity. If there is only data from one
# experiment,then the default error std.dev. value is assumed to be 5% of the absolute maximum value of the data
# corresponding to that response quantity.
- defaultErrorVariances = 1e-12 * np.ones_like(
+ defaultErrorVariances = 1e-12 * np.ones_like( # noqa: N806
self.edpLengthsList, dtype=float
)
# defaultErrorVariances = np.zeros_like(self.edpLengthsList, dtype=float)
if (
np.shape(self.calibrationData)[0] > 1
): # if there are more than 1 rows of data, i.e. data from multiple experiments
- currentIndex = 0
+ currentIndex = 0 # noqa: N806
for i in range(len(self.edpLengthsList)):
- dataSlice = self.calibrationData[
+ dataSlice = self.calibrationData[ # noqa: N806
:, currentIndex : currentIndex + self.edpLengthsList[i]
]
v = np.nanvar(dataSlice)
if v != 0:
defaultErrorVariances[i] = v
- currentIndex += self.edpLengthsList[i]
+ currentIndex += self.edpLengthsList[i] # noqa: N806
else:
- currentIndex = 0
+ currentIndex = 0 # noqa: N806
for i in range(len(self.edpLengthsList)):
- dataSlice = self.calibrationData[
+ dataSlice = self.calibrationData[ # noqa: N806
:, currentIndex : currentIndex + self.edpLengthsList[i]
]
v = np.max(np.absolute(dataSlice))
if v != 0:
defaultErrorVariances[i] = (0.05 * v) ** 2
- currentIndex += self.edpLengthsList[i]
+ currentIndex += self.edpLengthsList[i] # noqa: N806
self.defaultErrorVariances = defaultErrorVariances
- def createCovarianceMatrix(self):
- covarianceMatrixList = []
- covarianceTypeList = []
+ def createCovarianceMatrix(self): # noqa: C901, N802, D102
+ covarianceMatrixList = [] # noqa: N806
+ covarianceTypeList = [] # noqa: N806
- logFile = self.logFile
- edpNamesList = self.edpNamesList
- workdirMain = self.workdirMain
- numExperiments = self.numExperiments
+ logFile = self.logFile # noqa: N806
+ edpNamesList = self.edpNamesList # noqa: N806
+ workdirMain = self.workdirMain # noqa: N806
+ numExperiments = self.numExperiments # noqa: N806
logFile.write('\n\nLooping over the experiments and EDPs')
# First, check if the user has passed in any covariance matrix data
- for expNum in range(1, numExperiments + 1):
+ for expNum in range(1, numExperiments + 1): # noqa: N806
logFile.write(f'\n\nExperiment number: {expNum}')
- for i, edpName in enumerate(edpNamesList):
+ for i, edpName in enumerate(edpNamesList): # noqa: N806
logFile.write(f'\n\tEDP: {edpName}')
- covarianceFileName = f'{edpName}.{expNum}.sigma'
- covarianceFile = os.path.join(workdirMain, covarianceFileName)
+ covarianceFileName = f'{edpName}.{expNum}.sigma' # noqa: N806
+ covarianceFile = os.path.join(workdirMain, covarianceFileName) # noqa: PTH118, N806
logFile.write(
f"\n\t\tChecking to see if user-supplied file '{covarianceFileName}' exists in '{workdirMain}'"
)
- if os.path.isfile(covarianceFile):
+ if os.path.isfile(covarianceFile): # noqa: PTH113
logFile.write('\n\t\tFound a user supplied file.')
if self.runType == 'runningLocal':
src = covarianceFile
- dst = os.path.join(workdirMain, covarianceFileName)
+ dst = os.path.join(workdirMain, covarianceFileName) # noqa: PTH118
logFile.write(
f'\n\t\tCopying user-supplied covariance file from {src} to {dst}'
)
shutil.copyfile(src, dst)
- covarianceFile = dst
+ covarianceFile = dst # noqa: N806
logFile.write(
f"\n\t\tReading in user supplied covariance matrix from file: '{covarianceFile}'"
)
# Check the data in the covariance matrix file
- tmpCovFile = os.path.join(
+ tmpCovFile = os.path.join( # noqa: PTH118, N806
workdirMain, 'quoFEMTempCovMatrixFile.sigma'
)
- numRows = 0
- numCols = 0
+ numRows = 0 # noqa: N806
+ numCols = 0 # noqa: N806
linenum = 0
- with open(tmpCovFile, 'w') as f1:
- with open(covarianceFile) as f:
+ with open(tmpCovFile, 'w') as f1: # noqa: SIM117, PTH123
+ with open(covarianceFile) as f: # noqa: PTH123
for line in f:
linenum += 1
if len(line.strip()) == 0:
continue
- else:
- line = line.replace(',', ' ')
+ else: # noqa: RET507
+ line = line.replace(',', ' ') # noqa: PLW2901
# Check the length of the line
words = line.split()
if numRows == 0:
- numCols = len(words)
+ numCols = len(words) # noqa: N806
elif numCols != len(words):
logFile.write(
f'\nERROR: The number of columns in line {numRows} do not match the '
f'number of columns in line {numRows - 1} of file {covarianceFile}.'
)
- raise DataProcessingError(
- f'ERROR: The number of columns in line {numRows} do not match the '
+ raise DataProcessingError( # noqa: TRY003
+ f'ERROR: The number of columns in line {numRows} do not match the ' # noqa: EM102
f'number of columns in line {numRows - 1} of file {covarianceFile}.'
)
- tempLine = ''
+ tempLine = '' # noqa: N806
for w in words:
- tempLine += f'{w} '
+ tempLine += f'{w} ' # noqa: N806
# logFile.write("\ncovMatrixLine {}: ".format(linenum), tempLine)
if numRows == 0:
f1.write(tempLine)
else:
f1.write('\n')
f1.write(tempLine)
- numRows += 1
- covMatrix = np.genfromtxt(tmpCovFile)
+ numRows += 1 # noqa: N806
+ covMatrix = np.genfromtxt(tmpCovFile) # noqa: N806
covarianceMatrixList.append(covMatrix)
# os.remove(tmpCovFile)
logFile.write(
@@ -187,8 +187,8 @@ def createCovarianceMatrix(self):
f'\nERROR: The number of columns of data in the covariance matrix file {covarianceFile}'
f' must be either 1 or {self.edpLengthsList[i]}. Found {numCols} columns'
)
- raise DataProcessingError(
- f'ERROR: The number of columns of data in the covariance matrix file {covarianceFile}'
+ raise DataProcessingError( # noqa: TRY003
+ f'ERROR: The number of columns of data in the covariance matrix file {covarianceFile}' # noqa: EM102
f' must be either 1 or {self.edpLengthsList[i]}. Found {numCols} columns'
)
elif numRows == self.edpLengthsList[i]:
@@ -206,8 +206,8 @@ def createCovarianceMatrix(self):
f'\nERROR: The number of columns of data in the covariance matrix file {covarianceFile}'
f' must be either 1 or {self.edpLengthsList[i]}. Found {numCols} columns'
)
- raise DataProcessingError(
- f'ERROR: The number of columns of data in the covariance matrix file {covarianceFile}'
+ raise DataProcessingError( # noqa: TRY003
+ f'ERROR: The number of columns of data in the covariance matrix file {covarianceFile}' # noqa: EM102
f' must be either 1 or {self.edpLengthsList[i]}. Found {numCols} columns'
)
else:
@@ -215,8 +215,8 @@ def createCovarianceMatrix(self):
f'\nERROR: The number of rows of data in the covariance matrix file {covarianceFile}'
f' must be either 1 or {self.edpLengthsList[i]}. Found {numCols} rows'
)
- raise DataProcessingError(
- f'ERROR: The number of rows of data in the covariance matrix file {covarianceFile}'
+ raise DataProcessingError( # noqa: TRY003
+ f'ERROR: The number of rows of data in the covariance matrix file {covarianceFile}' # noqa: EM102
f' must be either 1 or {self.edpLengthsList[i]}. Found {numCols} rows'
)
logFile.write(f'\n\t\tCovariance matrix: {covMatrix}')
@@ -227,7 +227,7 @@ def createCovarianceMatrix(self):
logFile.write(
'\n\t\tThe covariance matrix is an identity matrix multiplied by this value.'
)
- scalarVariance = np.array(self.defaultErrorVariances[i])
+ scalarVariance = np.array(self.defaultErrorVariances[i]) # noqa: N806
covarianceMatrixList.append(scalarVariance)
covarianceTypeList.append('scalar')
logFile.write(f'\n\t\tCovariance matrix: {scalarVariance}')
@@ -238,20 +238,20 @@ def createCovarianceMatrix(self):
)
tmp = block_diag(*covarianceMatrixList)
for row in tmp:
- rowString = ' '.join([f'{col:14.8g}' for col in row])
+ rowString = ' '.join([f'{col:14.8g}' for col in row]) # noqa: N806
logFile.write(f'\n\t{rowString}')
return self.covarianceMatrixList
-class CalDataPreparer:
+class CalDataPreparer: # noqa: D101
def __init__(
self,
- workdirMain: str,
- workdirTemplate: str,
- calDataFileName: str,
- edpNamesList: list[str],
- edpLengthsList: list[int],
- logFile: TextIO,
+ workdirMain: str, # noqa: N803
+ workdirTemplate: str, # noqa: N803
+ calDataFileName: str, # noqa: N803
+ edpNamesList: list[str], # noqa: FA102, N803
+ edpLengthsList: list[int], # noqa: FA102, N803
+ logFile: TextIO, # noqa: N803
) -> None:
self.workdirMain = workdirMain
self.workdirTemplate = workdirTemplate
@@ -262,16 +262,16 @@ def __init__(
self.lineLength = sum(edpLengthsList)
self.moveCalDataFile(self.calDataFileName)
- def moveCalDataFile(self, calDataFileName):
- os.rename(
- os.path.join(self.workdirTemplate, calDataFileName),
- os.path.join(self.workdirMain, calDataFileName),
+ def moveCalDataFile(self, calDataFileName): # noqa: N802, N803, D102
+ os.rename( # noqa: PTH104
+ os.path.join(self.workdirTemplate, calDataFileName), # noqa: PTH118
+ os.path.join(self.workdirMain, calDataFileName), # noqa: PTH118
)
- def createHeadings(self):
+ def createHeadings(self): # noqa: N802, D102
self.logFile.write('\n\tCreating headings')
headings = 'Exp_num interface '
- for i, edpName in enumerate(self.edpNamesList):
+ for i, edpName in enumerate(self.edpNamesList): # noqa: N806
if self.edpLengthsList[i] == 1:
headings += f'{edpName} '
else:
@@ -280,30 +280,30 @@ def createHeadings(self):
self.logFile.write(f'\n\t\tThe headings are: \n\t\t{headings}')
return headings
- def createTempCalDataFile(self, calDataFile):
- self.tempCalDataFile = os.path.join(
+ def createTempCalDataFile(self, calDataFile): # noqa: N802, N803, D102
+ self.tempCalDataFile = os.path.join( # noqa: PTH118
self.workdirMain, 'quoFEMTempCalibrationDataFile.cal'
)
- f1 = open(self.tempCalDataFile, 'w')
+ f1 = open(self.tempCalDataFile, 'w') # noqa: SIM115, PTH123
headings = self.createHeadings()
f1.write(headings)
interface = 1
self.numExperiments = 0
linenum = 0
- with open(calDataFile) as f:
+ with open(calDataFile) as f: # noqa: PTH123
for line in f:
linenum += 1
if len(line.strip()) == 0:
continue
- else:
- line = line.replace(',', ' ')
+ else: # noqa: RET507
+ line = line.replace(',', ' ') # noqa: PLW2901
# Check length of each line
words = line.split()
if len(words) == self.lineLength:
self.numExperiments += 1
- tempLine = f'{self.numExperiments} {interface} '
+ tempLine = f'{self.numExperiments} {interface} ' # noqa: N806
for w in words:
- tempLine += f'{w} '
+ tempLine += f'{w} ' # noqa: N806
self.logFile.write(
f'\n\tLine {linenum}, length {len(words)}: \n\t\t{tempLine}'
)
@@ -313,13 +313,13 @@ def createTempCalDataFile(self, calDataFile):
f"\nERROR: The number of entries ({len(words)}) in line num {linenum} of the file '{calDataFile}' "
f'does not match the expected length {self.lineLength}'
)
- raise DataProcessingError(
- f"ERROR: The number of entries ({len(words)}) in line num {linenum} of the file '{calDataFile}' "
+ raise DataProcessingError( # noqa: TRY003
+ f"ERROR: The number of entries ({len(words)}) in line num {linenum} of the file '{calDataFile}' " # noqa: EM102
f'does not match the expected length {self.lineLength}'
)
f1.close()
- def readCleanedCalData(self):
+ def readCleanedCalData(self): # noqa: N802, D102
self.calibrationData = np.atleast_2d(
np.genfromtxt(
self.tempCalDataFile,
@@ -328,8 +328,8 @@ def readCleanedCalData(self):
)
)
- def getCalibrationData(self):
- calDataFile = os.path.join(self.workdirMain, self.calDataFileName)
+ def getCalibrationData(self): # noqa: N802, D102
+ calDataFile = os.path.join(self.workdirMain, self.calDataFileName) # noqa: PTH118, N806
self.logFile.write(
f'\nCalibration data file being processed: \n\t{calDataFile}\n'
)
@@ -338,13 +338,13 @@ def getCalibrationData(self):
return self.calibrationData, self.numExperiments
-def transform_data_function(
+def transform_data_function( # noqa: D103
data_to_transform: np.ndarray,
- list_of_data_segment_lengths: list[int],
- list_of_scale_factors: list[float],
- list_of_shift_factors: list[float],
+ list_of_data_segment_lengths: list[int], # noqa: FA102
+ list_of_scale_factors: list[float], # noqa: FA102
+ list_of_shift_factors: list[float], # noqa: FA102
):
- currentPosition = 0
+ currentPosition = 0 # noqa: N806
for j in range(len(list_of_data_segment_lengths)):
slice_of_data = data_to_transform[
:,
@@ -355,18 +355,18 @@ def transform_data_function(
:,
currentPosition : currentPosition + list_of_data_segment_lengths[j],
] = slice_of_data / list_of_scale_factors[j]
- currentPosition += list_of_data_segment_lengths[j]
+ currentPosition += list_of_data_segment_lengths[j] # noqa: N806
return data_to_transform
-class DataTransformer:
- def __init__(self, transformStrategy: str, logFile: TextIO) -> None:
+class DataTransformer: # noqa: D101
+ def __init__(self, transformStrategy: str, logFile: TextIO) -> None: # noqa: N803
self.logFile = logFile
self.transformStrategyList = ['absMaxScaling', 'standardize']
if transformStrategy not in self.transformStrategyList:
string = ' or '.join(self.transformStrategyList)
- raise ValueError(f'transform strategy must be one of {string}')
- else:
+ raise ValueError(f'transform strategy must be one of {string}') # noqa: EM102, TRY003
+ else: # noqa: RET506
self.transformStrategy = transformStrategy
logFile.write(
@@ -376,18 +376,18 @@ def __init__(self, transformStrategy: str, logFile: TextIO) -> None:
'prediction) and \nthen scaled (the data and prediction will be divided by a positive scalar value).'
)
- def computeScaleAndShiftFactors(
+ def computeScaleAndShiftFactors( # noqa: N802, D102
self,
- calibrationData: np.ndarray,
- edpLengthsList: list[int],
+ calibrationData: np.ndarray, # noqa: N803
+ edpLengthsList: list[int], # noqa: FA102, N803
):
self.calibrationData = calibrationData
self.edpLengthsList = edpLengthsList
- shiftFactors = []
- scaleFactors = []
- currentPosition = 0
- locShift = 0.0
+ shiftFactors = [] # noqa: N806
+ scaleFactors = [] # noqa: N806
+ currentPosition = 0 # noqa: N806
+ locShift = 0.0 # noqa: N806
if self.transformStrategy == 'absMaxScaling':
# Compute the scale factors - absolute maximum of the data for each response variable
self.logFile.write(
@@ -398,17 +398,17 @@ def computeScaleAndShiftFactors(
'\n\tthen the scale factor is set to 1.0, and the shift factor is set to 1.0.'
)
for j in range(len(self.edpLengthsList)):
- calibrationDataSlice = calibrationData[
+ calibrationDataSlice = calibrationData[ # noqa: N806
:,
currentPosition : currentPosition + self.edpLengthsList[j],
]
- absMax = np.absolute(np.max(calibrationDataSlice))
+ absMax = np.absolute(np.max(calibrationDataSlice)) # noqa: N806
if absMax == 0: # This is to handle the case if abs max of data = 0.
- locShift = 1.0
- absMax = 1.0
+ locShift = 1.0 # noqa: N806
+ absMax = 1.0 # noqa: N806
shiftFactors.append(locShift)
scaleFactors.append(absMax)
- currentPosition += self.edpLengthsList[j]
+ currentPosition += self.edpLengthsList[j] # noqa: N806
else:
self.logFile.write(
'\n\nComputing scale and shift factors. '
@@ -418,23 +418,23 @@ def computeScaleAndShiftFactors(
'\n\tthen the scale factor is set to 1.0.'
)
for j in range(len(self.edpLengthsList)):
- calibrationDataSlice = calibrationData[
+ calibrationDataSlice = calibrationData[ # noqa: N806
:,
currentPosition : currentPosition + self.edpLengthsList[j],
]
- meanValue = np.nanmean(calibrationDataSlice)
- stdValue = np.nanstd(calibrationDataSlice)
+ meanValue = np.nanmean(calibrationDataSlice) # noqa: N806
+ stdValue = np.nanstd(calibrationDataSlice) # noqa: N806
if stdValue == 0: # This is to handle the case if stdev of data = 0.
- stdValue = 1.0
+ stdValue = 1.0 # noqa: N806
scaleFactors.append(stdValue)
shiftFactors.append(-meanValue)
- currentPosition += self.edpLengthsList[j]
+ currentPosition += self.edpLengthsList[j] # noqa: N806
self.scaleFactors = scaleFactors
self.shiftFactors = shiftFactors
return scaleFactors, shiftFactors
- def transformData(self):
+ def transformData(self): # noqa: N802, D102
return transform_data_function(
self.calibrationData,
self.edpLengthsList,
@@ -443,24 +443,24 @@ def transformData(self):
)
-def createLogFile(where: str, logfile_name: str):
- logfile = open(os.path.join(where, logfile_name), 'w')
+def createLogFile(where: str, logfile_name: str): # noqa: N802, D103
+ logfile = open(os.path.join(where, logfile_name), 'w') # noqa: SIM115, PTH118, PTH123
logfile.write(
'Starting analysis at: {}'.format(
time.strftime('%a, %d %b %Y %H:%M:%S', time.localtime())
)
)
logfile.write("\nRunning quoFEM's UCSD_UQ engine workflow")
- logfile.write('\nCWD: {}'.format(os.path.abspath('.')))
+ logfile.write('\nCWD: {}'.format(os.path.abspath('.'))) # noqa: PTH100
return logfile
-def syncLogFile(logFile: TextIO):
+def syncLogFile(logFile: TextIO): # noqa: N802, N803, D103
logFile.flush()
os.fsync(logFile.fileno())
-def make_distributions(variables):
+def make_distributions(variables): # noqa: C901, D103
all_distributions_list = []
for i in range(len(variables['names'])):
@@ -582,14 +582,14 @@ def make_distributions(variables):
return all_distributions_list
-class LogLikelihoodHandler:
+class LogLikelihoodHandler: # noqa: D101
def __init__(
self,
data: NDArray,
- covariance_matrix_blocks_list: list[NDArray],
- list_of_data_segment_lengths: list[int],
- list_of_scale_factors: list[float],
- list_of_shift_factors: list[float],
+ covariance_matrix_blocks_list: list[NDArray], # noqa: FA102
+ list_of_data_segment_lengths: list[int], # noqa: FA102
+ list_of_scale_factors: list[float], # noqa: FA102
+ list_of_shift_factors: list[float], # noqa: FA102
workdir_main,
full_path_to_tmcmc_code_directory: str,
log_likelihood_file_name: str = '',
@@ -613,16 +613,16 @@ def _copy_log_likelihood_module(self):
len(self.log_likelihood_file_name) == 0
): # if the log-likelihood file is an empty string
self.log_likelihood_file_name = 'defaultLogLikeScript.py'
- src = os.path.join(
+ src = os.path.join( # noqa: PTH118
self.full_path_to_tmcmc_code_directory,
self.log_likelihood_file_name,
)
- dst = os.path.join(self.workdir_main, self.log_likelihood_file_name)
+ dst = os.path.join(self.workdir_main, self.log_likelihood_file_name) # noqa: PTH118
try:
shutil.copyfile(src, dst)
- except Exception:
+ except Exception: # noqa: BLE001
msg = f"ERROR: The log-likelihood script '{src}' cannot be copied to '{dst}'."
- raise Exception(msg)
+ raise Exception(msg) # noqa: B904, TRY002
def _get_num_experiments(self) -> int:
return np.shape(self.data)[0]
@@ -635,13 +635,13 @@ def _import_log_likelihood_module(
) -> Callable:
try:
module = import_module(log_likelihood_module_name)
- except:
- msg = f"\n\t\t\t\tERROR: The log-likelihood script '{os.path.join(self.workdir_main, self.log_likelihood_file_name)}' cannot be imported."
- raise ImportError(msg)
+ except: # noqa: E722
+ msg = f"\n\t\t\t\tERROR: The log-likelihood script '{os.path.join(self.workdir_main, self.log_likelihood_file_name)}' cannot be imported." # noqa: PTH118
+ raise ImportError(msg) # noqa: B904
return module # type: ignore
- def get_log_likelihood_function(self) -> Callable:
- log_likelihood_module_name = os.path.splitext(self.log_likelihood_file_name)[
+ def get_log_likelihood_function(self) -> Callable: # noqa: D102
+ log_likelihood_module_name = os.path.splitext(self.log_likelihood_file_name)[ # noqa: PTH122
0
]
module = self._import_log_likelihood_module(log_likelihood_module_name)
@@ -677,16 +677,16 @@ def _loop_for_log_likelihood(
list_of_covariance_multipliers,
):
transformed_prediction = self._transform_prediction(prediction)
- allResiduals = self._compute_residuals(transformed_prediction)
+ allResiduals = self._compute_residuals(transformed_prediction) # noqa: N806
loglike = 0
for i in range(self.num_experiments):
- currentPosition = 0
+ currentPosition = 0 # noqa: N806
for j in range(self.num_response_quantities):
length = self.list_of_data_segment_lengths[j]
residuals = allResiduals[
i, currentPosition : currentPosition + length
]
- currentPosition = currentPosition + length
+ currentPosition = currentPosition + length # noqa: N806
cov = self._make_covariance(j, list_of_covariance_multipliers[j])
mean = self._make_mean(j)
ll = self.log_likelihood_function(residuals, mean, cov)
@@ -696,10 +696,10 @@ def _loop_for_log_likelihood(
loglike += -np.inf
return loglike
- def evaluate_log_likelihood(
+ def evaluate_log_likelihood( # noqa: D102
self,
prediction: NDArray,
- list_of_covariance_multipliers: list[float],
+ list_of_covariance_multipliers: list[float], # noqa: FA102
) -> float:
return self._loop_for_log_likelihood(
prediction=prediction,
diff --git a/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py b/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py
index bb9e62092..cd147d466 100644
--- a/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py
+++ b/modules/performUQ/UCSD_UQ/defaultLogLikeScript.py
@@ -1,4 +1,4 @@
-import numpy as np
+import numpy as np # noqa: INP001, D100
class CovError(Exception):
@@ -15,15 +15,15 @@ def __init__(self, message):
def log_likelihood(
- calibrationData,
+ calibrationData, # noqa: N803
prediction,
- numExperiments,
- covarianceMatrixList,
- edpNamesList,
- edpLengthsList,
- covarianceMultiplierList,
- scaleFactors,
- shiftFactors,
+ numExperiments, # noqa: N803
+ covarianceMatrixList, # noqa: N803
+ edpNamesList, # noqa: ARG001, N803
+ edpLengthsList, # noqa: N803
+ covarianceMultiplierList, # noqa: N803
+ scaleFactors, # noqa: N803
+ shiftFactors, # noqa: N803
):
"""Compute the log-likelihood
@@ -67,21 +67,21 @@ def log_likelihood(
distribution and a user-supplied covariance structure. Block-diagonal covariance structures are supported. The value
of multipliers on the covariance block corresponding to each response quantity is also calibrated.
:rtype: float
- """
+ """ # noqa: D400
# Check if the correct number of covariance terms has been passed in
- numResponses = len(edpLengthsList)
+ numResponses = len(edpLengthsList) # noqa: N806
if len(covarianceMatrixList) != numExperiments * numResponses:
- print(
+ print( # noqa: T201
f'ERROR: The expected number of covariance matrices is {numExperiments * numResponses}, but only {len(covarianceMatrixList)} were passed '
'in.'
)
- raise CovError(
- f'ERROR: The expected number of covariance matrices is {numExperiments * numResponses}, but only {len(covarianceMatrixList)} were passed '
+ raise CovError( # noqa: TRY003
+ f'ERROR: The expected number of covariance matrices is {numExperiments * numResponses}, but only {len(covarianceMatrixList)} were passed ' # noqa: EM102
'in.'
)
# Shift and normalize the prediction
- currentPosition = 0
+ currentPosition = 0 # noqa: N806
for j in range(len(edpLengthsList)):
prediction[:, currentPosition : currentPosition + edpLengthsList[j]] = (
prediction[:, currentPosition : currentPosition + edpLengthsList[j]]
@@ -91,25 +91,25 @@ def log_likelihood(
prediction[:, currentPosition : currentPosition + edpLengthsList[j]]
/ scaleFactors[j]
)
- currentPosition = currentPosition + edpLengthsList[j]
+ currentPosition = currentPosition + edpLengthsList[j] # noqa: N806
# Compute the normalized residuals
- allResiduals = prediction - calibrationData
+ allResiduals = prediction - calibrationData # noqa: N806
# Loop over the normalized residuals to compute the log-likelihood
loglike = 0
- covListIndex = 0
+ covListIndex = 0 # noqa: N806
for i in range(numExperiments):
- currentPosition = 0
+ currentPosition = 0 # noqa: N806
for j in range(numResponses):
# Get the residuals corresponding to this response variable
length = edpLengthsList[j]
residuals = allResiduals[i, currentPosition : currentPosition + length]
- currentPosition = currentPosition + length
+ currentPosition = currentPosition + length # noqa: N806
# Get the covariance matrix corresponding to this response variable
cov = np.atleast_2d(covarianceMatrixList[covListIndex])
- covListIndex = covListIndex + 1
+ covListIndex = covListIndex + 1 # noqa: N806
# Multiply the covariance matrix by the value of the covariance multiplier
cov = cov * covarianceMultiplierList[j]
@@ -132,11 +132,11 @@ def log_likelihood(
# Mahalanobis distance]
# = -1/2*[t1 + t2 + t3]
t1 = length * np.log(2 * np.pi)
- eigenValues, eigenVectors = np.linalg.eigh(cov)
+ eigenValues, eigenVectors = np.linalg.eigh(cov) # noqa: N806
logdet = np.sum(np.log(eigenValues))
- eigenValuesReciprocal = 1.0 / eigenValues
+ eigenValuesReciprocal = 1.0 / eigenValues # noqa: N806
z = eigenVectors * np.sqrt(eigenValuesReciprocal)
- mahalanobisDistance = np.square(np.dot(residuals, z)).sum()
+ mahalanobisDistance = np.square(np.dot(residuals, z)).sum() # noqa: N806
ll = -0.5 * (t1 + logdet + mahalanobisDistance)
if not np.isnan(ll):
loglike += ll
diff --git a/modules/performUQ/UCSD_UQ/loglike_script.py b/modules/performUQ/UCSD_UQ/loglike_script.py
index 44c30add8..46da3581f 100644
--- a/modules/performUQ/UCSD_UQ/loglike_script.py
+++ b/modules/performUQ/UCSD_UQ/loglike_script.py
@@ -1,4 +1,4 @@
-# from scipy.stats import multivariate_normal
+# from scipy.stats import multivariate_normal # noqa: INP001, D100
# def log_likelihood(residuals, mean, cov):
# return multivariate_normal.logpdf(residuals, mean=mean, cov=cov)
@@ -6,7 +6,7 @@
import numpy as np
-def log_likelihood(residuals, mean, cov):
+def log_likelihood(residuals, mean, cov): # noqa: ARG001, D103
length = len(residuals)
if np.shape(cov)[0] == np.shape(cov)[1] == 1:
# If there is a single variance value that is constant for all residual terms, then this is the case of
@@ -26,11 +26,11 @@ def log_likelihood(residuals, mean, cov):
# Mahalanobis distance]
# = -1/2*[t1 + t2 + t3]
t1 = length * np.log(2 * np.pi)
- eigenValues, eigenVectors = np.linalg.eigh(cov)
+ eigenValues, eigenVectors = np.linalg.eigh(cov) # noqa: N806
logdet = np.sum(np.log(eigenValues))
- eigenValuesReciprocal = 1.0 / eigenValues
+ eigenValuesReciprocal = 1.0 / eigenValues # noqa: N806
z = eigenVectors * np.sqrt(eigenValuesReciprocal)
- mahalanobisDistance = np.square(np.dot(residuals, z)).sum()
+ mahalanobisDistance = np.square(np.dot(residuals, z)).sum() # noqa: N806
ll = -0.5 * (t1 + logdet + mahalanobisDistance)
return ll
diff --git a/modules/performUQ/UCSD_UQ/mainscript.py b/modules/performUQ/UCSD_UQ/mainscript.py
index 48f8924d2..edaceef17 100644
--- a/modules/performUQ/UCSD_UQ/mainscript.py
+++ b/modules/performUQ/UCSD_UQ/mainscript.py
@@ -1,7 +1,7 @@
"""authors: Mukesh Kumar Ramancha, Maitreya Manoj Kurumbhati, Prof. J.P. Conte, Aakash Bangalore Satish*
affiliation: University of California, San Diego, *SimCenter, University of California, Berkeley
-"""
+""" # noqa: INP001, D205, D400
# ======================================================================================================================
import json
@@ -14,7 +14,7 @@
# ======================================================================================================================
-def main(input_args):
+def main(input_args): # noqa: D103
# # Initialize analysis
# path_to_UCSD_UQ_directory = Path(input_args[2]).resolve().parent
# path_to_working_directory = Path(input_args[3]).resolve()
@@ -24,7 +24,7 @@ def main(input_args):
# input_file_name = input_args[7]
# Initialize analysis
- path_to_UCSD_UQ_directory = Path(input_args[0]).resolve().parent
+ path_to_UCSD_UQ_directory = Path(input_args[0]).resolve().parent # noqa: N806, F841
path_to_working_directory = Path(input_args[1]).resolve()
path_to_template_directory = Path(input_args[2]).resolve()
run_type = input_args[3] # either "runningLocal" or "runningRemote"
@@ -36,7 +36,7 @@ def main(input_args):
input_file_full_path = path_to_template_directory / input_file_name
- with open(input_file_full_path, encoding='utf-8') as f:
+ with open(input_file_full_path, encoding='utf-8') as f: # noqa: PTH123
inputs = json.load(f)
uq_inputs = inputs['UQ']
diff --git a/modules/performUQ/UCSD_UQ/mainscript_hierarchical_bayesian.py b/modules/performUQ/UCSD_UQ/mainscript_hierarchical_bayesian.py
index a33d1bc0b..48ec64caf 100644
--- a/modules/performUQ/UCSD_UQ/mainscript_hierarchical_bayesian.py
+++ b/modules/performUQ/UCSD_UQ/mainscript_hierarchical_bayesian.py
@@ -1,4 +1,4 @@
-import json
+import json # noqa: INP001, D100
import sys
from pathlib import Path
@@ -9,11 +9,11 @@
path_to_common_uq = Path(__file__).parent.parent / 'common'
sys.path.append(str(path_to_common_uq))
-import mwg_sampler
-import uq_utilities
+import mwg_sampler # noqa: E402
+import uq_utilities # noqa: E402
-def generate_initial_states(
+def generate_initial_states( # noqa: D103
num_edp,
num_rv,
num_datasets,
@@ -58,7 +58,7 @@ def generate_initial_states(
)
-def loglikelihood_function(residual, error_variance_sample):
+def loglikelihood_function(residual, error_variance_sample): # noqa: D103
mean = 0
var = error_variance_sample
standard_deviation = np.sqrt(var)
@@ -68,17 +68,17 @@ def loglikelihood_function(residual, error_variance_sample):
return ll
-def main(input_args):
+def main(input_args): # noqa: D103
# Initialize analysis
working_directory = Path(input_args[0]).resolve()
- template_directory = Path(input_args[1]).resolve()
+ template_directory = Path(input_args[1]).resolve() # noqa: F841
run_type = input_args[2] # either "runningLocal" or "runningRemote"
- workflow_driver = input_args[3]
+ workflow_driver = input_args[3] # noqa: F841
input_file = input_args[4]
# input_file_full_path = template_directory / input_file
- with open(input_file, encoding='utf-8') as f:
+ with open(input_file, encoding='utf-8') as f: # noqa: PTH123
inputs = json.load(f)
uq_inputs = inputs['UQ']
@@ -124,12 +124,12 @@ def main(input_args):
restart_file,
)
- # TODO: get_initial_states():
+ # TODO: get_initial_states(): # noqa: TD002
# either:
# read them from file or
# use LHS to explore the space and find the best starting points out of
# those sampled values for the different chains
- # TODO: get_initial_proposal_covariance_matrix():
+ # TODO: get_initial_proposal_covariance_matrix(): # noqa: TD002
# either:
# read them from file or
# adaptively tune the proposal covariance matrix by running the chain for
@@ -237,7 +237,7 @@ def main(input_args):
list_of_initial_states_of_error_variance_per_dataset
)
- with open(results_directory_path / 'sample_0.json', 'w', encoding='utf-8') as f:
+ with open(results_directory_path / 'sample_0.json', 'w', encoding='utf-8') as f: # noqa: PTH123
json.dump(results_to_write, f, indent=4)
adaptivity_results = {}
@@ -247,16 +247,16 @@ def main(input_args):
adaptivity_results['proposal_scale_list'] = proposal_scale_list
cov_kernels_list = []
for cov_kernel in list_of_proposal_covariance_kernels:
- cov_kernels_list.append(cov_kernel.tolist())
+ cov_kernels_list.append(cov_kernel.tolist()) # noqa: PERF401
adaptivity_results['list_of_proposal_covariance_kernels'] = cov_kernels_list
- with open(
+ with open( # noqa: PTH123
results_directory_path.parent / f'adaptivity_results_{0}.json',
'w',
encoding='utf-8',
) as f:
json.dump(adaptivity_results, f, indent=4)
- samples = mwg_sampler.metropolis_within_gibbs_sampler(
+ samples = mwg_sampler.metropolis_within_gibbs_sampler( # noqa: F841
uq_inputs,
parallel_evaluation_function,
function_to_evaluate,
diff --git a/modules/performUQ/UCSD_UQ/mainscript_tmcmc.py b/modules/performUQ/UCSD_UQ/mainscript_tmcmc.py
index ebcd66ea6..47d647750 100644
--- a/modules/performUQ/UCSD_UQ/mainscript_tmcmc.py
+++ b/modules/performUQ/UCSD_UQ/mainscript_tmcmc.py
@@ -1,7 +1,7 @@
"""authors: Mukesh Kumar Ramancha, Maitreya Manoj Kurumbhati, Prof. J.P. Conte, Aakash Bangalore Satish*
affiliation: University of California, San Diego, *SimCenter, University of California, Berkeley
-"""
+""" # noqa: INP001, D205, D400
# ======================================================================================================================
import os
@@ -25,14 +25,14 @@
# ======================================================================================================================
-def computeModelPosteriorProbabilities(modelPriorProbabilities, modelEvidences):
+def computeModelPosteriorProbabilities(modelPriorProbabilities, modelEvidences): # noqa: N802, N803, D103
denominator = np.dot(modelPriorProbabilities, modelEvidences)
return modelPriorProbabilities * modelEvidences / denominator
-def computeModelPosteriorProbabilitiesUsingLogEvidences(
- modelPriorProbabilities,
- modelLogEvidences,
+def computeModelPosteriorProbabilitiesUsingLogEvidences( # noqa: N802, D103
+ modelPriorProbabilities, # noqa: N803
+ modelLogEvidences, # noqa: N803
):
deltas = modelLogEvidences - np.min(modelLogEvidences)
denominator = np.dot(modelPriorProbabilities, np.exp(deltas))
@@ -42,15 +42,15 @@ def computeModelPosteriorProbabilitiesUsingLogEvidences(
# ======================================================================================================================
-class TMCMC_Data:
+class TMCMC_Data: # noqa: D101
def __init__(
self,
- mainscriptPath: str,
- workdirMain: str,
- runType: str,
- workflowDriver: str,
- logFile: TextIO,
- numBurnInSteps: int = 10,
+ mainscriptPath: str, # noqa: N803
+ workdirMain: str, # noqa: N803
+ runType: str, # noqa: N803
+ workflowDriver: str, # noqa: N803
+ logFile: TextIO, # noqa: N803
+ numBurnInSteps: int = 10, # noqa: N803
) -> None:
self.mainscriptPath = mainscriptPath
self.workdirMain = workdirMain
@@ -65,18 +65,18 @@ def __init__(
self.numBurnInSteps = numBurnInSteps
self.numSkipSteps = 1
- def getMPI_size(self):
+ def getMPI_size(self): # noqa: N802, D102
if self.runType == 'runningRemote':
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
self.MPI_size = self.comm.Get_size()
- def updateUQInfo(self, numberOfSamples, seedVal):
+ def updateUQInfo(self, numberOfSamples, seedVal): # noqa: N802, N803, D102
self.numberOfSamples = numberOfSamples
self.seedVal = seedVal
- def findNumProcessorsAvailable(self):
+ def findNumProcessorsAvailable(self): # noqa: N802, D102
if self.runType == 'runningLocal':
import multiprocessing as mp
@@ -89,7 +89,7 @@ def findNumProcessorsAvailable(self):
else:
self.numProcessors = 1
- def getNumChains(self, numberOfSamples, runType, numProcessors):
+ def getNumChains(self, numberOfSamples, runType, numProcessors): # noqa: N802, N803, D102
if runType == 'runningLocal':
self.numChains = int(min(numProcessors, self.recommendedNumChains))
elif runType == 'runningRemote':
@@ -99,7 +99,7 @@ def getNumChains(self, numberOfSamples, runType, numProcessors):
self.numChains = max(self.numChains, numberOfSamples)
- def getNumStepsPerChainAfterBurnIn(self, numParticles, numChains):
+ def getNumStepsPerChainAfterBurnIn(self, numParticles, numChains): # noqa: N802, N803, D102
self.numStepsAfterBurnIn = (
int(np.ceil(numParticles / numChains)) * self.numSkipSteps
)
@@ -110,7 +110,7 @@ def getNumStepsPerChainAfterBurnIn(self, numParticles, numChains):
# ======================================================================================================================
-def main(input_args):
+def main(input_args): # noqa: D103
t1 = time.time()
# Initialize analysis
@@ -121,9 +121,9 @@ def main(input_args):
# driver_file = input_args[4]
# input_json_filename = input_args[5]
- mainscript_path = os.path.abspath(__file__)
- working_directory = os.path.abspath(input_args[0])
- template_directory = os.path.abspath(input_args[1])
+ mainscript_path = os.path.abspath(__file__) # noqa: PTH100
+ working_directory = os.path.abspath(input_args[0]) # noqa: PTH100
+ template_directory = os.path.abspath(input_args[1]) # noqa: PTH100
run_type = input_args[2] # either "runningLocal" or "runningRemote"
driver_file = input_args[3]
input_json_filename = input_args[4]
@@ -133,8 +133,8 @@ def main(input_args):
# Remove dakotaTab and dakotaTabPrior files if they already exist in the working directory
try:
- os.remove('dakotaTab.out')
- os.remove('dakotTabPrior.out')
+ os.remove('dakotaTab.out') # noqa: PTH107
+ os.remove('dakotTabPrior.out') # noqa: PTH107
except OSError:
pass
@@ -160,7 +160,7 @@ def main(input_args):
input_json_filename_full_path,
logfile,
working_directory,
- os.path.dirname(mainscript_path),
+ os.path.dirname(mainscript_path), # noqa: PTH120
)
syncLogFile(logfile)
@@ -234,12 +234,12 @@ def main(input_args):
logfile,
run_type,
)
- defaultErrorVariances = cov_matrix_options_instance.getDefaultErrorVariances()
+ defaultErrorVariances = cov_matrix_options_instance.getDefaultErrorVariances() # noqa: N806, F841
covariance_matrix_list = cov_matrix_options_instance.createCovarianceMatrix()
# ======================================================================================================================
# Get log-likelihood function
- LL_Handler = LogLikelihoodHandler(
+ LL_Handler = LogLikelihoodHandler( # noqa: N806
data=transformed_calibration_data,
covariance_matrix_blocks_list=covariance_matrix_list,
list_of_data_segment_lengths=edp_lengths_list,
@@ -264,10 +264,10 @@ def main(input_args):
logfile.write(f'\n\tNumber of particles: {number_of_samples}')
# number of max MCMC steps
- number_of_MCMC_steps = (
+ number_of_MCMC_steps = ( # noqa: N806
tmcmc_data_instance.numBurnInSteps + tmcmc_data_instance.numStepsAfterBurnIn
)
- max_number_of_MCMC_steps = 10
+ max_number_of_MCMC_steps = 10 # noqa: N806
logfile.write(f'\n\tNumber of MCMC steps in first stage: {number_of_MCMC_steps}')
logfile.write(
f'\n\tMax. number of MCMC steps in any stage: {max_number_of_MCMC_steps}'
@@ -374,7 +374,7 @@ def main(input_args):
syncLogFile(logfile)
- modelPosteriorProbabilities = computeModelPosteriorProbabilities(
+ modelPosteriorProbabilities = computeModelPosteriorProbabilities( # noqa: N806
model_prior_probabilities, model_evidences
)
@@ -405,7 +405,7 @@ def main(input_args):
# ======================================================================================================================
if __name__ == '__main__':
- inputArgs = sys.argv
+ inputArgs = sys.argv # noqa: N816
main(inputArgs)
# ======================================================================================================================
diff --git a/modules/performUQ/UCSD_UQ/mwg_sampler.py b/modules/performUQ/UCSD_UQ/mwg_sampler.py
index 64bbdba2d..546fb23d2 100644
--- a/modules/performUQ/UCSD_UQ/mwg_sampler.py
+++ b/modules/performUQ/UCSD_UQ/mwg_sampler.py
@@ -1,14 +1,14 @@
-import json
+import json # noqa: INP001, D100
from pathlib import Path
import numpy as np
import scipy
path_to_common_uq = Path(__file__).parent.parent / 'common'
-import sys
+import sys # noqa: E402
sys.path.append(str(path_to_common_uq))
-import uq_utilities
+import uq_utilities # noqa: E402
def _update_parameters_of_normal_inverse_wishart_distribution(
@@ -43,7 +43,7 @@ def _update_parameters_of_inverse_gamma_distribution(
return alpha_n, beta_n
-def _draw_one_sample(
+def _draw_one_sample( # noqa: PLR0913
sample_number,
random_state,
num_rv,
@@ -269,24 +269,24 @@ def _draw_one_sample(
list_of_strings_to_write.append(f'{dataset_number + 1}')
x_string_list = []
for x_val in x:
- x_string_list.append(f'{x_val}')
+ x_string_list.append(f'{x_val}') # noqa: PERF401
list_of_strings_to_write.append('\t'.join(x_string_list))
y_string_list = []
for y_val in y:
- y_string_list.append(f'{y_val}')
+ y_string_list.append(f'{y_val}') # noqa: PERF401
list_of_strings_to_write.append('\t'.join(y_string_list))
tabular_results_file_name = (
- uq_utilities._get_tabular_results_file_name_for_dataset(
+ uq_utilities._get_tabular_results_file_name_for_dataset( # noqa: SLF001
tabular_results_file_base_name, dataset_number
)
)
string_to_write = '\t'.join(list_of_strings_to_write) + '\n'
- uq_utilities._write_to_tabular_results_file(
+ uq_utilities._write_to_tabular_results_file( # noqa: SLF001
tabular_results_file_name, string_to_write
)
- with open(results_directory_path / f'sample_{sample_number + 1}.json', 'w') as f:
+ with open(results_directory_path / f'sample_{sample_number + 1}.json', 'w') as f: # noqa: PTH123
json.dump(results_to_write, f, indent=4)
return one_sample, results_to_write
@@ -303,13 +303,13 @@ def _get_tabular_results_file_name_for_hyperparameters(
tabular_results_parent
/ f'{tabular_results_stem}_hyperparameters{tabular_results_extension}'
)
- return tabular_results_file
+ return tabular_results_file # noqa: RET504
-def get_states_from_samples_list(samples_list, dataset_number):
+def get_states_from_samples_list(samples_list, dataset_number): # noqa: D103
sample_values = []
for sample_number in range(len(samples_list)):
- sample_values.append(
+ sample_values.append( # noqa: PERF401
samples_list[sample_number]['new_states'][dataset_number].flatten()
)
return sample_values
@@ -326,23 +326,23 @@ def tune(scale, acc_rate):
>0.5 x 1.1
>0.75 x 2
>0.95 x 10
- """
- if acc_rate < 0.01:
+ """ # noqa: D205, D400
+ if acc_rate < 0.01: # noqa: PLR2004
return scale * 0.01
- elif acc_rate < 0.05:
+ elif acc_rate < 0.05: # noqa: RET505, PLR2004
return scale * 0.1
- elif acc_rate < 0.2:
+ elif acc_rate < 0.2: # noqa: PLR2004
return scale * 0.5
- elif acc_rate > 0.95:
+ elif acc_rate > 0.95: # noqa: PLR2004
return scale * 100.0
- elif acc_rate > 0.75:
+ elif acc_rate > 0.75: # noqa: PLR2004
return scale * 10.0
- elif acc_rate > 0.5:
+ elif acc_rate > 0.5: # noqa: PLR2004
return scale * 2
return scale
-def metropolis_within_gibbs_sampler(
+def metropolis_within_gibbs_sampler( # noqa: C901, D103, PLR0913
uq_inputs,
parallel_evaluation_function,
function_to_evaluate,
@@ -367,7 +367,7 @@ def metropolis_within_gibbs_sampler(
current_mean_sample,
current_covariance_sample,
list_of_current_error_variance_samples_scaled,
- parent_distribution,
+ parent_distribution, # noqa: ARG001
num_accepts_list,
proposal_scale_list,
list_of_proposal_covariance_kernels,
@@ -387,24 +387,24 @@ def metropolis_within_gibbs_sampler(
)[0]
)
- initial_list_of_proposal_covariance_kernels = list_of_proposal_covariance_kernels
+ initial_list_of_proposal_covariance_kernels = list_of_proposal_covariance_kernels # noqa: F841
for dataset_number in range(num_datasets):
tabular_results_file_name = (
- uq_utilities._get_tabular_results_file_name_for_dataset(
+ uq_utilities._get_tabular_results_file_name_for_dataset( # noqa: SLF001
tabular_results_file_base_name, dataset_number
)
)
rv_string_list = []
for rv in rv_inputs:
- rv_string_list.append(rv['name'])
+ rv_string_list.append(rv['name']) # noqa: PERF401
error_var_string_list = []
edp_string_list = []
edp = edp_inputs[dataset_number]
error_var_string_list.append(f'{edp["name"]}.PredictionErrorVariance')
edp_components_list = []
for edp_component in range(edp['length']):
- edp_components_list.append(f'{edp["name"]}_{edp_component + 1}')
+ edp_components_list.append(f'{edp["name"]}_{edp_component + 1}') # noqa: PERF401
edp_string_list.append('\t'.join(edp_components_list))
list_of_header_strings = []
@@ -415,7 +415,7 @@ def metropolis_within_gibbs_sampler(
list_of_header_strings.append('\t'.join(edp_string_list))
string_to_write = '\t'.join(list_of_header_strings) + '\n'
tabular_results_file_name.touch()
- uq_utilities._write_to_tabular_results_file(
+ uq_utilities._write_to_tabular_results_file( # noqa: SLF001
tabular_results_file_name, string_to_write
)
@@ -431,7 +431,7 @@ def metropolis_within_gibbs_sampler(
rv_covariance_string_list = []
for i in range(len(rv_names_list)):
for j in range(i, len(rv_names_list)):
- rv_covariance_string_list.append(
+ rv_covariance_string_list.append( # noqa: PERF401
f'cov_{rv_names_list[i]}_{rv_names_list[j]}'
)
list_of_hyperparameter_header_strings.append(
@@ -446,7 +446,7 @@ def metropolis_within_gibbs_sampler(
)
)
hyperparameter_tabular_results_file_name.touch()
- uq_utilities._write_to_tabular_results_file(
+ uq_utilities._write_to_tabular_results_file( # noqa: SLF001
hyperparameter_tabular_results_file_name,
hyperparameter_header_string,
)
@@ -462,7 +462,7 @@ def metropolis_within_gibbs_sampler(
'\t'.join(list_of_predictive_distribution_sample_header_strings) + '\n'
)
tabular_results_file_base_name.touch()
- uq_utilities._write_to_tabular_results_file(
+ uq_utilities._write_to_tabular_results_file( # noqa: SLF001
tabular_results_file_base_name,
predictive_distribution_sample_header_string,
)
@@ -529,8 +529,8 @@ def metropolis_within_gibbs_sampler(
samples_array = np.array(states[-tuning_interval:]).T
try:
cov_kernel = np.cov(samples_array)
- except Exception as exc:
- print(
+ except Exception as exc: # noqa: BLE001
+ print( # noqa: T201
f'Sample number: {sample_number}, dataset number:'
f' {dataset_number}, Exception in covariance'
f' calculation: {exc}'
@@ -543,8 +543,8 @@ def metropolis_within_gibbs_sampler(
cholesky_of_proposal_covariance_matrix = scipy.linalg.cholesky(
proposal_covariance_matrix, lower=True
)
- except Exception as exc:
- print(
+ except Exception as exc: # noqa: BLE001
+ print( # noqa: T201
f'Sample number: {sample_number}, dataset number:'
f' {dataset_number}, Exception in cholesky'
f' calculation: {exc}'
@@ -565,11 +565,11 @@ def metropolis_within_gibbs_sampler(
adaptivity_results['proposal_scale_list'] = proposal_scale_list
cov_kernels_list = []
for cov_kernel in list_of_proposal_covariance_kernels:
- cov_kernels_list.append(cov_kernel.tolist())
+ cov_kernels_list.append(cov_kernel.tolist()) # noqa: PERF401
adaptivity_results['list_of_proposal_covariance_kernels'] = (
cov_kernels_list
)
- with open(
+ with open( # noqa: PTH123
results_directory_path.parent
/ f'adaptivity_results_{sample_number}.json',
'w',
@@ -579,12 +579,12 @@ def metropolis_within_gibbs_sampler(
hyper_mean_string_list = []
hyper_mean = current_mean_sample
for val in hyper_mean:
- hyper_mean_string_list.append(f'{val}')
+ hyper_mean_string_list.append(f'{val}') # noqa: PERF401
hyper_covariance_string_list = []
hyper_covariance = current_covariance_sample
for i in range(len(rv_names_list)):
for j in range(i, len(rv_names_list)):
- hyper_covariance_string_list.append(f'{hyper_covariance[i][j]}')
+ hyper_covariance_string_list.append(f'{hyper_covariance[i][j]}') # noqa: PERF401
list_of_hyperparameter_value_strings = []
list_of_hyperparameter_value_strings.append(f'{sample_number + 1}')
list_of_hyperparameter_value_strings.append('0')
@@ -597,7 +597,7 @@ def metropolis_within_gibbs_sampler(
hyperparameter_value_string = (
'\t'.join(list_of_hyperparameter_value_strings) + '\n'
)
- uq_utilities._write_to_tabular_results_file(
+ uq_utilities._write_to_tabular_results_file( # noqa: SLF001
hyperparameter_tabular_results_file_name,
hyperparameter_value_string,
)
@@ -615,7 +615,7 @@ def metropolis_within_gibbs_sampler(
- n_samples_for_mean_of_updated_predictive_distribution_parameters,
num_samples,
):
- with open(results_directory_path / f'sample_{i + 1}.json') as f:
+ with open(results_directory_path / f'sample_{i + 1}.json') as f: # noqa: PTH123
data = json.load(f)
updated_parameters = data[
'updated_parameters_of_normal_inverse_wishart_distribution'
@@ -630,7 +630,7 @@ def metropolis_within_gibbs_sampler(
nu_n_mean = np.mean(np.array(nu_n), axis=0)
psi_n_mean = np.mean(np.array(psi_n), axis=0)
- df = nu_n_mean - num_datasets + 1
+ df = nu_n_mean - num_datasets + 1 # noqa: PD901
loc = mu_n_mean
shape = (lambda_n_mean + 1) / (lambda_n_mean * df) * psi_n_mean
predictive_distribution = scipy.stats.multivariate_t(loc=loc, shape=shape, df=df)
@@ -650,7 +650,7 @@ def metropolis_within_gibbs_sampler(
)
predictive_distribution_sample_values_list = []
for val in sample_from_predictive_distribution:
- predictive_distribution_sample_values_list.append(f'{val}')
+ predictive_distribution_sample_values_list.append(f'{val}') # noqa: PERF401
list_of_predictive_distribution_sample_value_strings = []
list_of_predictive_distribution_sample_value_strings.append(
f'{sample_number + 1}'
@@ -662,7 +662,7 @@ def metropolis_within_gibbs_sampler(
predictive_distribution_sample_value_string = (
'\t'.join(list_of_predictive_distribution_sample_value_strings) + '\n'
)
- uq_utilities._write_to_tabular_results_file(
+ uq_utilities._write_to_tabular_results_file( # noqa: SLF001
tabular_results_file_base_name,
predictive_distribution_sample_value_string,
)
diff --git a/modules/performUQ/UCSD_UQ/parseData.py b/modules/performUQ/UCSD_UQ/parseData.py
index a100bfac9..763926aef 100644
--- a/modules/performUQ/UCSD_UQ/parseData.py
+++ b/modules/performUQ/UCSD_UQ/parseData.py
@@ -1,7 +1,7 @@
"""authors: Mukesh Kumar Ramancha, Maitreya Manoj Kurumbhati, Prof. J.P. Conte, and Aakash Bangalore Satish*
affiliation: University of California, San Diego, *SimCenter, University of California, Berkeley
-"""
+""" # noqa: INP001, D205, D400
import itertools
import json
@@ -23,20 +23,20 @@ def __init__(self, message):
self.message = message
-def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
+def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir): # noqa: C901, N802, N803, D103, PLR0915
# Read in the json object
logFile.write('\n\tReading the json file')
- with open(dakotaJsonFile) as f:
- jsonInputs = json.load(f)
+ with open(dakotaJsonFile) as f: # noqa: PTH123
+ jsonInputs = json.load(f) # noqa: N806
logFile.write(' ... Done')
# Read in the data of the objects within the json file
logFile.write('\n\tParsing the inputs read in from json file')
applications = jsonInputs['Applications']
- edpInputs = jsonInputs['EDP']
- uqInputs = jsonInputs['UQ']
- femInputs = jsonInputs['FEM']
- rvInputs = jsonInputs['randomVariables']
+ edpInputs = jsonInputs['EDP'] # noqa: N806
+ uqInputs = jsonInputs['UQ'] # noqa: N806
+ femInputs = jsonInputs['FEM'] # noqa: N806, F841
+ rvInputs = jsonInputs['randomVariables'] # noqa: N806
# localAppDirInputs = jsonInputs['localAppDir']
# pythonInputs = jsonInputs['python']
# remoteAppDirInputs = jsonInputs['remoteAppDir']
@@ -50,23 +50,23 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
# numCol = spreadsheet['numCol']
# numRow = spreadsheet['numRow']
# summary = uqResultsInputs['summary']
- workingDir = jsonInputs['workingDir']
+ workingDir = jsonInputs['workingDir'] # noqa: N806, F841
# Processing UQ inputs
logFile.write('\n\t\tProcessing UQ inputs')
- seedValue = uqInputs['seed']
- nSamples = uqInputs['numParticles']
+ seedValue = uqInputs['seed'] # noqa: N806
+ nSamples = uqInputs['numParticles'] # noqa: N806
# maxRunTime = uqInputs["maxRunTime"]
- if 'maxRunTime' in uqInputs.keys():
- maxRunTime = uqInputs['maxRunTime']
+ if 'maxRunTime' in uqInputs.keys(): # noqa: SIM118
+ maxRunTime = uqInputs['maxRunTime'] # noqa: N806
else:
- maxRunTime = float('inf')
- logLikelihoodFile = uqInputs['logLikelihoodFile']
- calDataFile = uqInputs['calDataFile']
+ maxRunTime = float('inf') # noqa: N806, F841
+ logLikelihoodFile = uqInputs['logLikelihoodFile'] # noqa: N806
+ calDataFile = uqInputs['calDataFile'] # noqa: N806
- parallelizeMCMC = True
+ parallelizeMCMC = True # noqa: N806
if 'parallelExecution' in uqInputs:
- parallelizeMCMC = uqInputs['parallelExecution']
+ parallelizeMCMC = uqInputs['parallelExecution'] # noqa: N806, F841
logFile.write('\n\t\t\tProcessing the log-likelihood script options')
# If log-likelihood script is provided, use that, otherwise, use default log-likelihood function
@@ -76,56 +76,56 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
logFile.write(
f"\n\t\t\t\tSearching for a user-defined log-likelihood script '{logLikelihoodFile}'"
)
- if os.path.exists(os.path.join(tmpSimCenterDir, logLikelihoodFile)):
+ if os.path.exists(os.path.join(tmpSimCenterDir, logLikelihoodFile)): # noqa: PTH110, PTH118
logFile.write(
f"\n\t\t\t\tFound log-likelihood file '{logLikelihoodFile}' in {tmpSimCenterDir}."
)
- logLikeModuleName = os.path.splitext(logLikelihoodFile)[0]
+ logLikeModuleName = os.path.splitext(logLikelihoodFile)[0] # noqa: PTH122, N806
try:
import_module(logLikeModuleName)
except:
logFile.write(
- f"\n\t\t\t\tERROR: The log-likelihood script '{os.path.join(tmpSimCenterDir, logLikelihoodFile)}' cannot be imported."
+ f"\n\t\t\t\tERROR: The log-likelihood script '{os.path.join(tmpSimCenterDir, logLikelihoodFile)}' cannot be imported." # noqa: PTH118
)
raise
else:
logFile.write(
f"\n\t\t\t\tERROR: The log-likelihood script '{logLikelihoodFile}' cannot be found in {tmpSimCenterDir}."
)
- raise FileNotFoundError(
- f"ERROR: The log-likelihood script '{logLikelihoodFile}' cannot be found in {tmpSimCenterDir}."
+ raise FileNotFoundError( # noqa: TRY003
+ f"ERROR: The log-likelihood script '{logLikelihoodFile}' cannot be found in {tmpSimCenterDir}." # noqa: EM102
)
else:
- defaultLogLikeFileName = 'defaultLogLikeScript.py'
- defaultLogLikeDirectoryPath = mainscriptDir
+ defaultLogLikeFileName = 'defaultLogLikeScript.py' # noqa: N806
+ defaultLogLikeDirectoryPath = mainscriptDir # noqa: N806
sys.path.append(defaultLogLikeDirectoryPath)
- logLikeModuleName = os.path.splitext(defaultLogLikeFileName)[0]
+ logLikeModuleName = os.path.splitext(defaultLogLikeFileName)[0] # noqa: PTH122, N806
logFile.write('\n\t\t\t\tLog-likelihood script not provided.')
logFile.write(
- f'\n\t\t\t\tUsing the default log-likelihood script: \n\t\t\t\t\t{os.path.join(defaultLogLikeDirectoryPath, defaultLogLikeFileName)}'
+ f'\n\t\t\t\tUsing the default log-likelihood script: \n\t\t\t\t\t{os.path.join(defaultLogLikeDirectoryPath, defaultLogLikeFileName)}' # noqa: PTH118
)
try:
import_module(logLikeModuleName)
except:
logFile.write(
- f"\n\t\t\t\tERROR: The log-likelihood script '{os.path.join(tmpSimCenterDir, logLikelihoodFile)}' cannot be imported."
+ f"\n\t\t\t\tERROR: The log-likelihood script '{os.path.join(tmpSimCenterDir, logLikelihoodFile)}' cannot be imported." # noqa: PTH118
)
raise
- logLikeModule = import_module(logLikeModuleName)
+ logLikeModule = import_module(logLikeModuleName) # noqa: N806
# Processing EDP inputs
logFile.write('\n\n\t\tProcessing EDP inputs')
- edpNamesList = []
- edpLengthsList = []
+ edpNamesList = [] # noqa: N806
+ edpLengthsList = [] # noqa: N806
# Get list of EDPs and their lengths
for edp in edpInputs:
edpNamesList.append(edp['name'])
edpLengthsList.append(edp['length'])
logFile.write('\n\t\t\tThe EDPs defined are:')
- printString = '\n\t\t\t\t'
+ printString = '\n\t\t\t\t' # noqa: N806
for i in range(len(edpInputs)):
- printString += (
+ printString += ( # noqa: N806
f"Name: '{edpNamesList[i]}', Length: {edpLengthsList[i]}\n\t\t\t\t"
)
logFile.write(printString)
@@ -135,12 +135,12 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
logFile.write('\n\n\t\tProcessing application inputs')
# Processing number of models
# Check if this is a multi-model analysis
- runMultiModel = False
- modelsDict = {}
- modelIndicesList = []
- modelRVNamesList = []
+ runMultiModel = False # noqa: N806, F841
+ modelsDict = {} # noqa: N806
+ modelIndicesList = [] # noqa: N806
+ modelRVNamesList = [] # noqa: N806
applications = jsonInputs['Applications']
- for app, appInputs in applications.items():
+ for app, appInputs in applications.items(): # noqa: N806
logFile.write(f'\n\t\t\tApp: {app}')
if app.lower() != 'events':
appl = appInputs['Application'].lower()
@@ -151,11 +151,11 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
logFile.write(
f'\n\t\t\t\tFound a multimodel application - {app}: {appInputs["Application"]}'
)
- modelRVName = jsonInputs[app]['modelToRun'][3:]
- appModels = jsonInputs[app]['models']
- nM = len(appModels)
+ modelRVName = jsonInputs[app]['modelToRun'][3:] # noqa: N806
+ appModels = jsonInputs[app]['models'] # noqa: N806
+ nM = len(appModels) # noqa: N806
logFile.write(f'\n\t\t\t\t\tThere are {nM} {app} models')
- modelData = {}
+ modelData = {} # noqa: N806
modelData['nModels'] = nM
modelData['values'] = [i + 1 for i in range(nM)]
modelData['weights'] = [model['belief'] for model in appModels]
@@ -165,10 +165,10 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
modelRVNamesList.append(modelRVName)
else:
logFile.write('\n\t\t\t\tNot a multimodel application')
- nModels = 1
- for _, data in modelsDict.items():
- nModels = nModels * data['nModels']
- cartesianProductOfModelIndices = list(itertools.product(*modelIndicesList))
+ nModels = 1 # noqa: N806
+ for _, data in modelsDict.items(): # noqa: PERF102
+ nModels = nModels * data['nModels'] # noqa: N806
+ cartesianProductOfModelIndices = list(itertools.product(*modelIndicesList)) # noqa: N806
# logFile.write("\n\t\t\tNO LONGER Getting the number of models")
# inputFileList = []
# nModels = femInputs['numInputs']
@@ -180,12 +180,12 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
# else:
# inputFileList.append(femInputs['inputFile'])
# logFile.write('\n\t\t\t\tThe number of models is: {}'.format(nModels))
- writeFEMOutputs = True
+ writeFEMOutputs = True # noqa: N806
# Variables
- variablesList = []
+ variablesList = [] # noqa: N806
for _ in range(nModels):
- variablesList.append(
+ variablesList.append( # noqa: PERF401
{
'names': [],
'distributions': [],
@@ -205,13 +205,13 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
for i, rv in enumerate(rvInputs):
variablesList[ind]['names'].append(rv['name'])
variablesList[ind]['distributions'].append(rv['distribution'])
- paramString = ''
+ paramString = '' # noqa: N806
if rv['distribution'] == 'Uniform':
variablesList[ind]['Par1'].append(rv['lowerbound'])
variablesList[ind]['Par2'].append(rv['upperbound'])
variablesList[ind]['Par3'].append(None)
variablesList[ind]['Par4'].append(None)
- paramString = 'params: {}, {}'.format(
+ paramString = 'params: {}, {}'.format( # noqa: N806
rv['lowerbound'], rv['upperbound']
)
elif rv['distribution'] == 'Normal':
@@ -219,13 +219,13 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
variablesList[ind]['Par2'].append(rv['stdDev'])
variablesList[ind]['Par3'].append(None)
variablesList[ind]['Par4'].append(None)
- paramString = 'params: {}, {}'.format(rv['mean'], rv['stdDev'])
+ paramString = 'params: {}, {}'.format(rv['mean'], rv['stdDev']) # noqa: N806
elif rv['distribution'] == 'Half-Normal':
variablesList[ind]['Par1'].append(rv['Standard Deviation'])
variablesList[ind]['Par2'].append(rv['Upper Bound'])
variablesList[ind]['Par3'].append(None)
variablesList[ind]['Par4'].append(None)
- paramString = 'params: {}, {}'.format(
+ paramString = 'params: {}, {}'.format( # noqa: N806
rv['Standard Deviation'], rv['Upper Bound']
)
elif rv['distribution'] == 'Truncated-Normal':
@@ -233,7 +233,7 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
variablesList[ind]['Par2'].append(rv['Standard Deviation'])
variablesList[ind]['Par3'].append(rv['a'])
variablesList[ind]['Par4'].append(rv['b'])
- paramString = 'params: {}, {}, {}, {}'.format(
+ paramString = 'params: {}, {}, {}, {}'.format( # noqa: N806
rv['Mean'], rv['Standard Deviation'], rv['a'], rv['b']
)
elif rv['distribution'] == 'Beta':
@@ -241,7 +241,7 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
variablesList[ind]['Par2'].append(rv['betas'])
variablesList[ind]['Par3'].append(rv['lowerbound'])
variablesList[ind]['Par4'].append(rv['upperbound'])
- paramString = 'params: {}, {}, {}, {}'.format(
+ paramString = 'params: {}, {}, {}, {}'.format( # noqa: N806
rv['alphas'], rv['betas'], rv['lowerbound'], rv['upperbound']
)
elif rv['distribution'] == 'Lognormal':
@@ -257,13 +257,13 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
variablesList[ind]['Par2'].append(sigma)
variablesList[ind]['Par3'].append(None)
variablesList[ind]['Par4'].append(None)
- paramString = f'params: {mu}, {sigma}'
+ paramString = f'params: {mu}, {sigma}' # noqa: N806
elif rv['distribution'] == 'Gumbel':
variablesList[ind]['Par1'].append(rv['alphaparam'])
variablesList[ind]['Par2'].append(rv['betaparam'])
variablesList[ind]['Par3'].append(None)
variablesList[ind]['Par4'].append(None)
- paramString = 'params: {}, {}'.format(
+ paramString = 'params: {}, {}'.format( # noqa: N806
rv['alphaparam'], rv['betaparam']
)
elif rv['distribution'] == 'Weibull':
@@ -271,7 +271,7 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
variablesList[ind]['Par2'].append(rv['scaleparam'])
variablesList[ind]['Par3'].append(None)
variablesList[ind]['Par4'].append(None)
- paramString = 'params: {}, {}'.format(
+ paramString = 'params: {}, {}'.format( # noqa: N806
rv['shapeparam'], rv['scaleparam']
)
elif rv['distribution'] == 'Exponential':
@@ -279,25 +279,25 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
variablesList[ind]['Par2'].append(None)
variablesList[ind]['Par3'].append(None)
variablesList[ind]['Par4'].append(None)
- paramString = 'params: {}'.format(rv['lambda'])
+ paramString = 'params: {}'.format(rv['lambda']) # noqa: N806
elif rv['distribution'] == 'Gamma':
variablesList[ind]['Par1'].append(rv['k'])
variablesList[ind]['Par2'].append(rv['lambda'])
variablesList[ind]['Par3'].append(None)
variablesList[ind]['Par4'].append(None)
- paramString = 'params: {}, {}'.format(rv['k'], rv['lambda'])
+ paramString = 'params: {}, {}'.format(rv['k'], rv['lambda']) # noqa: N806
elif rv['distribution'] == 'Chisquare':
variablesList[ind]['Par1'].append(rv['k'])
variablesList[ind]['Par2'].append(None)
variablesList[ind]['Par3'].append(None)
variablesList[ind]['Par4'].append(None)
- paramString = 'params: {}'.format(rv['k'])
+ paramString = 'params: {}'.format(rv['k']) # noqa: N806
elif rv['distribution'] == 'Truncated exponential':
variablesList[ind]['Par1'].append(rv['lambda'])
variablesList[ind]['Par2'].append(rv['a'])
variablesList[ind]['Par3'].append(rv['b'])
variablesList[ind]['Par4'].append(None)
- paramString = 'params: {}, {}, {}'.format(
+ paramString = 'params: {}, {}, {}'.format( # noqa: N806
rv['lambda'], rv['a'], rv['b']
)
elif rv['distribution'] == 'Discrete':
@@ -310,7 +310,7 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
variablesList[ind]['Par2'].append(None)
variablesList[ind]['Par3'].append(None)
variablesList[ind]['Par4'].append(None)
- paramString = (
+ paramString = ( # noqa: N806
f'value: {cartesianProductOfModelIndices[ind][index]}'
)
except ValueError:
@@ -323,7 +323,7 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
variablesList[ind]['Par2'].append(rv['Weights'])
variablesList[ind]['Par3'].append(None)
variablesList[ind]['Par4'].append(None)
- paramString = 'values: {}, weights: {}'.format(
+ paramString = 'values: {}, weights: {}'.format( # noqa: N806
rv['Values'], rv['Weights']
)
@@ -356,7 +356,7 @@ def parseDataFunction(dakotaJsonFile, logFile, tmpSimCenterDir, mainscriptDir):
variablesList[ind]['Par2'].append(b)
variablesList[ind]['Par3'].append(None)
variablesList[ind]['Par4'].append(None)
- paramString = f'params: {a}, {b}'
+ paramString = f'params: {a}, {b}' # noqa: N806
logFile.write(
'\n\t\t\t\t\t\t\tEDP number: {}, name: {}, dist: {}, {}'.format(
i, name, 'InvGamma', paramString
diff --git a/modules/performUQ/UCSD_UQ/pdfs.py b/modules/performUQ/UCSD_UQ/pdfs.py
index 2919ef705..689e9bc00 100644
--- a/modules/performUQ/UCSD_UQ/pdfs.py
+++ b/modules/performUQ/UCSD_UQ/pdfs.py
@@ -1,22 +1,22 @@
-"""@author: Mukesh, Maitreya, Conte, Aakash"""
+"""@author: Mukesh, Maitreya, Conte, Aakash""" # noqa: INP001, D400
import numpy as np
from scipy import stats
-class Dist:
+class Dist: # noqa: D101
def __init__(self, dist_name, params=None, moments=None, data=None):
self.dist_name = dist_name
self.params = params
self.moments = moments
self.data = data
if (params is None) and (moments is None) and (data is None):
- raise RuntimeError(
- 'Atleast one of parameters, moments, or data must be specified when creating a random variable'
+ raise RuntimeError( # noqa: TRY003
+ 'Atleast one of parameters, moments, or data must be specified when creating a random variable' # noqa: EM101
)
-class Uniform:
+class Uniform: # noqa: D101
# Method with in this uniform class
def __init__(
self,
@@ -28,11 +28,11 @@ def __init__(
self.upper = upper
# Method to generate random numbers
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return (self.upper - self.lower) * np.random.rand(N) + self.lower
# Method to compute log of the pdf at x
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: D102
if (x - self.upper) * (x - self.lower) <= 0:
lp = np.log(1 / (self.upper - self.lower))
else:
@@ -40,14 +40,14 @@ def log_pdf_eval(self, x):
return lp
-class Halfnormal:
+class Halfnormal: # noqa: D101
def __init__(self, sig):
self.sig = sig
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return self.sig * np.abs(np.random.randn(N))
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: D102
if x >= 0:
lp = (
-np.log(self.sig)
@@ -59,31 +59,31 @@ def log_pdf_eval(self, x):
return lp
-class Normal:
+class Normal: # noqa: D101
def __init__(self, mu, sig):
self.mu = mu
self.sig = sig
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return self.sig * np.random.randn(N) + self.mu
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: D102
lp = (
-0.5 * np.log(2 * np.pi)
- np.log(self.sig)
- 0.5 * (((x - self.mu) / self.sig) ** 2)
)
- return lp
+ return lp # noqa: RET504
-class TrunNormal:
+class TrunNormal: # noqa: D101
def __init__(self, mu, sig, a, b):
self.mu = mu
self.sig = sig
self.a = a
self.b = b
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return stats.truncnorm(
(self.a - self.mu) / self.sig,
(self.b - self.mu) / self.sig,
@@ -91,28 +91,28 @@ def generate_rns(self, N):
scale=self.sig,
).rvs(N)
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: D102
lp = stats.truncnorm(
(self.a - self.mu) / self.sig,
(self.b - self.mu) / self.sig,
loc=self.mu,
scale=self.sig,
).logpdf(x)
- return lp
+ return lp # noqa: RET504
-class mvNormal:
- def __init__(self, mu, E):
+class mvNormal: # noqa: D101
+ def __init__(self, mu, E): # noqa: N803
self.mu = mu
self.E = E
self.d = len(mu)
self.logdetE = np.log(np.linalg.det(self.E))
self.Einv = np.linalg.inv(E)
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return np.random.multivariate_normal(self.mu, self.E, N)
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: D102
xc = x - self.mu
return (
-(0.5 * self.d * np.log(2 * np.pi))
@@ -121,20 +121,20 @@ def log_pdf_eval(self, x):
)
-class InvGamma:
+class InvGamma: # noqa: D101
def __init__(self, a, b):
self.a = a
self.b = b
self.dist = stats.invgamma(self.a, scale=self.b)
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return self.dist.rvs(size=N)
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: D102
return self.dist.logpdf(x)
-class BetaDist:
+class BetaDist: # noqa: D101
def __init__(self, alpha, beta, lowerbound, upperbound):
self.alpha = alpha
self.beta = beta
@@ -144,14 +144,14 @@ def __init__(self, alpha, beta, lowerbound, upperbound):
self.alpha, self.beta, self.lowerbound, self.upperbound
)
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return self.dist.rvs(size=N)
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: D102
return self.dist.logpdf(x)
-class LogNormDist:
+class LogNormDist: # noqa: D101
def __init__(self, mu, sigma):
# self.sigma = np.sqrt(np.log(zeta**2/lamda**2 + 1))
# self.mu = np.log(lamda) - 1/2*self.sigma**2
@@ -160,53 +160,53 @@ def __init__(self, mu, sigma):
self.scale = np.exp(mu)
self.dist = stats.lognorm(s=self.s, loc=self.loc, scale=self.scale)
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return self.dist.rvs(size=N)
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: D102
return self.dist.logpdf(x)
-class GumbelDist:
+class GumbelDist: # noqa: D101
def __init__(self, alpha, beta):
self.alpha = alpha
self.beta = beta
self.dist = stats.gumbel_r(loc=self.beta, scale=(1 / self.alpha))
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return self.dist.rvs(size=N)
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: D102
return self.dist.logpdf(x)
-class WeibullDist:
+class WeibullDist: # noqa: D101
def __init__(self, shape, scale):
self.shape = shape
self.scale = scale
self.dist = stats.weibull_min(c=self.shape, scale=self.scale)
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return self.dist.rvs(size=N)
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: D102
return self.dist.logpdf(x)
-class ExponentialDist:
+class ExponentialDist: # noqa: D101
def __init__(self, lamda):
self.lamda = lamda
self.scale = 1 / self.lamda
self.dist = stats.expon(scale=self.scale)
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return self.dist.rvs(size=N)
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: D102
return self.dist.logpdf(x)
-class TruncatedExponentialDist:
+class TruncatedExponentialDist: # noqa: D101
def __init__(self, lamda, lower, upper):
self.lower = lower
self.upper = upper
@@ -216,14 +216,14 @@ def __init__(self, lamda, lower, upper):
self.b = (self.upper - self.lower) / self.scale
self.dist = stats.truncexpon(b=self.b, loc=self.loc, scale=self.scale)
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return self.dist.rvs(size=N)
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: D102
return self.dist.logpdf(x)
-class GammaDist:
+class GammaDist: # noqa: D101
def __init__(self, k, lamda):
self.k = k
self.lamda = lamda
@@ -232,26 +232,26 @@ def __init__(self, k, lamda):
self.scale = 1 / self.beta
self.dist = stats.gamma(a=self.alpha, scale=self.scale)
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return self.dist.rvs(size=N)
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: D102
return self.dist.logpdf(x)
-class ChiSquareDist:
+class ChiSquareDist: # noqa: D101
def __init__(self, k):
self.k = k
self.dist = stats.chi2(k=self.k)
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return self.dist.rvs(size=N)
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: D102
return self.dist.logpdf(x)
-class DiscreteDist:
+class DiscreteDist: # noqa: D101
def __init__(self, values, weights):
self.values = values
self.weights = weights
@@ -259,10 +259,10 @@ def __init__(self, values, weights):
self.log_probabilities = np.log(self.weights) - np.log(np.sum(self.weights))
self.rng = np.random.default_rng()
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return self.rng.choice(self.values, N, p=self.probabilities)
- def U2X(self, u):
+ def U2X(self, u): # noqa: N802, D102
cumsum_prob = np.cumsum(self.probabilities)
cumsum_prob = np.insert(cumsum_prob, 0, 0)
cumsum_prob = cumsum_prob[:-1]
@@ -272,7 +272,7 @@ def U2X(self, u):
x[i] = self.values[np.where(cumsum_prob <= cdf_val)[0][-1]]
return x
- def log_pdf_eval(self, u):
+ def log_pdf_eval(self, u): # noqa: D102
x = self.U2X(u)
lp = np.zeros_like(x)
for i, x_comp in enumerate(x):
@@ -280,12 +280,12 @@ def log_pdf_eval(self, u):
return lp
-class ConstantInteger:
+class ConstantInteger: # noqa: D101
def __init__(self, value) -> None:
self.value = value
- def generate_rns(self, N):
+ def generate_rns(self, N): # noqa: N803, D102
return np.array([self.value for _ in range(N)], dtype=int)
- def log_pdf_eval(self, x):
+ def log_pdf_eval(self, x): # noqa: ARG002, D102
return 0.0
diff --git a/modules/performUQ/UCSD_UQ/preprocess_hierarchical_bayesian.py b/modules/performUQ/UCSD_UQ/preprocess_hierarchical_bayesian.py
index 9650d6059..8b88e340a 100644
--- a/modules/performUQ/UCSD_UQ/preprocess_hierarchical_bayesian.py
+++ b/modules/performUQ/UCSD_UQ/preprocess_hierarchical_bayesian.py
@@ -1,4 +1,4 @@
-import argparse
+import argparse # noqa: INP001, D100
import json
import shutil
import sys
@@ -9,7 +9,7 @@
path_to_common_uq = Path(__file__).parent.parent / 'common'
sys.path.append(str(path_to_common_uq))
-import uq_utilities
+import uq_utilities # noqa: E402
InputsType = tuple[
Path,
@@ -20,7 +20,7 @@
]
-class CommandLineArguments:
+class CommandLineArguments: # noqa: D101
working_directory_path: Path
template_directory_path: Path
run_type: Literal['runningLocal', 'runningRemote']
@@ -36,7 +36,7 @@ def _handle_arguments(
run_type = command_line_arguments.run_type
driver_file = command_line_arguments.driver_file
input_file = command_line_arguments.input_file
- with open(input_file) as f:
+ with open(input_file) as f: # noqa: PTH123
inputs = json.load(f)
return (
working_directory_path,
@@ -98,20 +98,20 @@ def _create_parser() -> argparse.ArgumentParser:
def _print_start_message(demarcation_string: str = '=', start_space: str = ''):
msg = f"'{Path(__file__).name}' started running"
- print()
- print(start_space + demarcation_string * len(msg))
- print(start_space + msg)
- print()
+ print() # noqa: T201
+ print(start_space + demarcation_string * len(msg)) # noqa: T201
+ print(start_space + msg) # noqa: T201
+ print() # noqa: T201
def _print_end_message(demarcation_string: str = '=', start_space: str = ''):
msg = f"'{Path(__file__).name}' finished running"
- print()
- print(start_space + msg)
- print(start_space + demarcation_string * len(msg))
+ print() # noqa: T201
+ print(start_space + msg) # noqa: T201
+ print(start_space + demarcation_string * len(msg)) # noqa: T201
-def main(arguments: InputsType):
+def main(arguments: InputsType): # noqa: D103
(
working_directory_path,
template_directory_path,
@@ -153,7 +153,7 @@ def main(arguments: InputsType):
list_of_dataset_lengths = []
for sample_number, dir_name_string in enumerate(list_of_dataset_subdirs):
- dir_name_string = list_of_dataset_subdirs[sample_number]
+ dir_name_string = list_of_dataset_subdirs[sample_number] # noqa: PLW2901
dir_name = Path(dir_name_string).stem
source_dir_name = template_directory_path / dir_name
destination_dir_name = working_directory_path / dir_name
@@ -210,10 +210,10 @@ def _parse_arguments(args) -> InputsType:
command_line_arguments = CommandLineArguments()
parser.parse_args(args=args, namespace=command_line_arguments)
arguments = _handle_arguments(command_line_arguments)
- return arguments
+ return arguments # noqa: RET504
-def preprocess_arguments(args):
+def preprocess_arguments(args): # noqa: D103
arguments = _parse_arguments(args)
return main(arguments=arguments)
diff --git a/modules/performUQ/UCSD_UQ/processInputs.py b/modules/performUQ/UCSD_UQ/processInputs.py
index c7299e2ff..97055a05b 100644
--- a/modules/performUQ/UCSD_UQ/processInputs.py
+++ b/modules/performUQ/UCSD_UQ/processInputs.py
@@ -1,4 +1,4 @@
-import argparse
+import argparse # noqa: INP001, D100
import json
import os
import platform
@@ -15,57 +15,57 @@
args, unknowns = parser.parse_known_args()
- inputFile = args.workflowInput
- runType = args.runType
- workflowDriver = args.driverFile
- outputFile = args.workflowOutput
+ inputFile = args.workflowInput # noqa: N816
+ runType = args.runType # noqa: N816
+ workflowDriver = args.driverFile # noqa: N816
+ outputFile = args.workflowOutput # noqa: N816
- cwd = os.getcwd()
- workdir_main = str(Path(cwd).parents[0])
+ cwd = os.getcwd() # noqa: PTH109
+ workdir_main = str(Path(cwd).parents[0]) # noqa: F821
# mainScriptPath = inputArgs[0]
# tmpSimCenterDir = inputArgs[1]
# templateDir = inputArgs[2]
# runType = inputArgs[3] # either "runningLocal" or "runningRemote"
- mainScriptPath = os.path.dirname(os.path.realpath(__file__))
- templateDir = cwd
- tmpSimCenterDir = str(Path(cwd).parents[0])
+ mainScriptPath = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N816
+ templateDir = cwd # noqa: N816
+ tmpSimCenterDir = str(Path(cwd).parents[0]) # noqa: N816, F821
# Change permission of workflow driver
if platform.system() != 'Windows':
- workflowDriverFile = os.path.join(templateDir, workflowDriver)
+ workflowDriverFile = os.path.join(templateDir, workflowDriver) # noqa: PTH118, N816
if runType == 'runningLocal':
- os.chmod(
+ os.chmod( # noqa: PTH101
workflowDriverFile,
stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR | stat.S_IXOTH,
)
- st = os.stat(workflowDriverFile)
- os.chmod(workflowDriverFile, st.st_mode | stat.S_IEXEC)
- pythonCommand = 'python3'
+ st = os.stat(workflowDriverFile) # noqa: PTH116
+ os.chmod(workflowDriverFile, st.st_mode | stat.S_IEXEC) # noqa: PTH101
+ pythonCommand = 'python3' # noqa: N816
else:
- pythonCommand = 'python'
- workflowDriver = workflowDriver + '.bat'
+ pythonCommand = 'python' # noqa: N816
+ workflowDriver = workflowDriver + '.bat' # noqa: N816
if runType == 'runningLocal':
# Get path to python from dakota.json file
- dakotaJsonFile = os.path.join(os.path.abspath(templateDir), inputFile)
- with open(dakotaJsonFile) as f:
- jsonInputs = json.load(f)
+ dakotaJsonFile = os.path.join(os.path.abspath(templateDir), inputFile) # noqa: PTH100, PTH118, N816
+ with open(dakotaJsonFile) as f: # noqa: PTH123
+ jsonInputs = json.load(f) # noqa: N816
- if 'python' in jsonInputs.keys():
- pythonCommand = jsonInputs['python']
+ if 'python' in jsonInputs.keys(): # noqa: SIM118
+ pythonCommand = jsonInputs['python'] # noqa: N816
# Get the path to the mainscript.py of TMCMC
# mainScriptDir = os.path.split(mainScriptPath)[0]
- mainScript = os.path.join(mainScriptPath, 'mainscript.py')
+ mainScript = os.path.join(mainScriptPath, 'mainscript.py') # noqa: PTH118, N816
command = f'{pythonCommand} {mainScript} {tmpSimCenterDir} {templateDir} {runType} {workflowDriver} {inputFile}'
try:
- result = subprocess.check_output(
+ result = subprocess.check_output( # noqa: S602
command, stderr=subprocess.STDOUT, shell=True
)
- returnCode = 0
+ returnCode = 0 # noqa: N816
except subprocess.CalledProcessError as e:
result = e.output
- returnCode = e.returncode
+ returnCode = e.returncode # noqa: N816
diff --git a/modules/performUQ/UCSD_UQ/runFEM.py b/modules/performUQ/UCSD_UQ/runFEM.py
index 46b8a787c..3e7497448 100644
--- a/modules/performUQ/UCSD_UQ/runFEM.py
+++ b/modules/performUQ/UCSD_UQ/runFEM.py
@@ -2,7 +2,7 @@
and Prof. J.P. Conte
affiliation: SimCenter*; University of California, San Diego
-"""
+""" # noqa: INP001, D205, D400
import os
import shutil
@@ -11,66 +11,66 @@
import numpy as np
-def copytree(src, dst, symlinks=False, ignore=None):
- if not os.path.exists(dst):
- os.makedirs(dst)
+def copytree(src, dst, symlinks=False, ignore=None): # noqa: FBT002, D103
+ if not os.path.exists(dst): # noqa: PTH110
+ os.makedirs(dst) # noqa: PTH103
for item in os.listdir(src):
- s = os.path.join(src, item)
- d = os.path.join(dst, item)
- if os.path.isdir(s):
+ s = os.path.join(src, item) # noqa: PTH118
+ d = os.path.join(dst, item) # noqa: PTH118
+ if os.path.isdir(s): # noqa: PTH112
copytree(s, d, symlinks, ignore)
else:
try:
if (
- not os.path.exists(d)
- or os.stat(s).st_mtime - os.stat(d).st_mtime > 1
+ not os.path.exists(d) # noqa: PTH110
+ or os.stat(s).st_mtime - os.stat(d).st_mtime > 1 # noqa: PTH116
):
shutil.copy2(s, d)
- except Exception as ex:
+ except Exception as ex: # noqa: BLE001
msg = f'Could not copy {s}. The following error occurred: \n{ex}'
- return msg
+ return msg # noqa: RET504
return '0'
-def runFEM(
- particleNumber,
- parameterSampleValues,
+def runFEM( # noqa: N802
+ particleNumber, # noqa: N803
+ parameterSampleValues, # noqa: N803
variables,
- workdirMain,
+ workdirMain, # noqa: N803
log_likelihood_function,
- calibrationData,
- numExperiments,
- covarianceMatrixList,
- edpNamesList,
- edpLengthsList,
- scaleFactors,
- shiftFactors,
- workflowDriver,
+ calibrationData, # noqa: ARG001, N803
+ numExperiments, # noqa: ARG001, N803
+ covarianceMatrixList, # noqa: ARG001, N803
+ edpNamesList, # noqa: N803
+ edpLengthsList, # noqa: N803
+ scaleFactors, # noqa: ARG001, N803
+ shiftFactors, # noqa: ARG001, N803
+ workflowDriver, # noqa: N803
):
"""This function runs FE model (model.tcl) for each parameter value (par)
model.tcl should take parameter input
model.tcl should output 'output$PN.txt' -> column vector of size 'Ny'
- """
- workdirName = 'workdir.' + str(particleNumber + 1)
- analysisPath = os.path.join(workdirMain, workdirName)
+ """ # noqa: D205, D400, D401, D404
+ workdirName = 'workdir.' + str(particleNumber + 1) # noqa: N806
+ analysisPath = os.path.join(workdirMain, workdirName) # noqa: PTH118, N806
- if os.path.isdir(analysisPath):
- os.chmod(os.path.join(analysisPath, workflowDriver), 0o777)
+ if os.path.isdir(analysisPath): # noqa: PTH112
+ os.chmod(os.path.join(analysisPath, workflowDriver), 0o777) # noqa: S103, PTH101, PTH118
shutil.rmtree(analysisPath)
- os.mkdir(analysisPath)
+ os.mkdir(analysisPath) # noqa: PTH102
# copy templatefiles
- templateDir = os.path.join(workdirMain, 'templatedir')
+ templateDir = os.path.join(workdirMain, 'templatedir') # noqa: PTH118, N806
copytree(templateDir, analysisPath)
# change to analysis directory
os.chdir(analysisPath)
# write input file and covariance multiplier values list
- covarianceMultiplierList = []
- parameterNames = variables['names']
- with open('params.in', 'w') as f:
+ covarianceMultiplierList = [] # noqa: N806
+ parameterNames = variables['names'] # noqa: N806
+ with open('params.in', 'w') as f: # noqa: PTH123
f.write(f'{len(parameterSampleValues) - len(edpNamesList)}\n')
for i in range(len(parameterSampleValues)):
name = str(parameterNames[i])
@@ -82,16 +82,16 @@ def runFEM(
# subprocess.run(workflowDriver, stderr=subprocess.PIPE, shell=True)
- returnCode = subprocess.call(
- os.path.join(analysisPath, workflowDriver),
+ returnCode = subprocess.call( # noqa: S602, N806, F841
+ os.path.join(analysisPath, workflowDriver), # noqa: PTH118
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
) # subprocess.check_call(workflow_run_command, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
# Read in the model prediction
- if os.path.exists('results.out'):
- with open('results.out') as f:
+ if os.path.exists('results.out'): # noqa: PTH110
+ with open('results.out') as f: # noqa: PTH123
prediction = np.atleast_2d(np.genfromtxt(f)).reshape((1, -1))
preds = prediction.copy()
os.chdir('../')
diff --git a/modules/performUQ/UCSD_UQ/runTMCMC.py b/modules/performUQ/UCSD_UQ/runTMCMC.py
index 9efce839c..41ab9b66a 100644
--- a/modules/performUQ/UCSD_UQ/runTMCMC.py
+++ b/modules/performUQ/UCSD_UQ/runTMCMC.py
@@ -1,7 +1,7 @@
"""authors: Mukesh Kumar Ramancha, Maitreya Manoj Kurumbhati, and Prof. J.P. Conte
affiliation: University of California, San Diego
modified: Aakash Bangalore Satish, NHERI SimCenter, UC Berkeley
-"""
+""" # noqa: INP001, D205, D400
import csv
import multiprocessing as mp
@@ -14,7 +14,7 @@
from runFEM import runFEM
-def write_stage_start_info_to_logfile(
+def write_stage_start_info_to_logfile( # noqa: D103
logfile,
stage_number,
beta,
@@ -29,9 +29,9 @@ def write_stage_start_info_to_logfile(
logfile.write('\n\t\tSampling from prior')
logfile.write('\n\t\tbeta = 0')
else:
- logfile.write('\n\t\tbeta = %9.8g' % beta)
+ logfile.write('\n\t\tbeta = %9.8g' % beta) # noqa: UP031
logfile.write('\n\t\tESS = %d' % effective_sample_size)
- logfile.write('\n\t\tscalem = %.2g' % scale_factor_for_proposal_covariance)
+ logfile.write('\n\t\tscalem = %.2g' % scale_factor_for_proposal_covariance) # noqa: UP031
logfile.write(f'\n\t\tlog-evidence = {log_evidence:<9.8g}')
logfile.write(
f'\n\n\t\tNumber of model evaluations in this stage: {number_of_samples}'
@@ -40,12 +40,12 @@ def write_stage_start_info_to_logfile(
os.fsync(logfile.fileno())
-def write_eval_data_to_logfile(
+def write_eval_data_to_logfile( # noqa: D103
logfile,
- parallelize_MCMC,
+ parallelize_MCMC, # noqa: N803
run_type,
proc_count=1,
- MPI_size=1,
+ MPI_size=1, # noqa: N803
stage_num=0,
):
if stage_num == 0:
@@ -75,13 +75,13 @@ def write_eval_data_to_logfile(
logfile.write(f'\n\t\t\tNumber of processors being used: {1}')
-def create_headings(
+def create_headings( # noqa: D103
logfile,
model_number,
model_parameters,
edp_names_list,
edp_lengths_list,
- writeOutputs,
+ writeOutputs, # noqa: N803
):
# Create the headings, which will be the first line of the file
headings = 'eval_id\tinterface\t'
@@ -101,15 +101,15 @@ def create_headings(
return headings
-def get_prediction_from_workdirs(i, working_directory):
+def get_prediction_from_workdirs(i, working_directory): # noqa: D103
workdir_string = 'workdir.' + str(i + 1)
prediction = np.atleast_2d(
- np.genfromtxt(os.path.join(working_directory, workdir_string, 'results.out'))
+ np.genfromtxt(os.path.join(working_directory, workdir_string, 'results.out')) # noqa: PTH118
).reshape((1, -1))
- return prediction
+ return prediction # noqa: RET504
-def write_data_to_tab_files(
+def write_data_to_tab_files( # noqa: D103
logfile,
working_directory,
model_number,
@@ -117,11 +117,11 @@ def write_data_to_tab_files(
edp_names_list,
edp_lengths_list,
number_of_samples,
- dataToWrite,
+ dataToWrite, # noqa: N803
tab_file_name,
predictions,
):
- tab_file_full_path = os.path.join(working_directory, tab_file_name)
+ tab_file_full_path = os.path.join(working_directory, tab_file_name) # noqa: PTH118
write_outputs = True
headings = create_headings(
logfile,
@@ -133,7 +133,7 @@ def write_data_to_tab_files(
)
logfile.write(f'\n\t\t\tWriting to file {tab_file_full_path}')
- with open(tab_file_full_path, 'a+') as f:
+ with open(tab_file_full_path, 'a+') as f: # noqa: PTH123
if model_number == 0:
f.write(headings)
for i in range(number_of_samples):
@@ -154,7 +154,7 @@ def write_data_to_tab_files(
os.fsync(logfile.fileno())
-def write_data_to_csvfile(
+def write_data_to_csvfile( # noqa: D103
logfile,
total_number_of_models_in_ensemble,
stage_number,
@@ -171,24 +171,24 @@ def write_data_to_csvfile(
)
else:
string_to_append = f'resultsStage{stage_number - 1}.csv'
- resultsFilePath = os.path.join(
- os.path.abspath(working_directory),
+ resultsFilePath = os.path.join( # noqa: PTH118, N806
+ os.path.abspath(working_directory), # noqa: PTH100
string_to_append,
)
- with open(resultsFilePath, 'w', newline='') as csvfile:
- csvWriter = csv.writer(csvfile)
+ with open(resultsFilePath, 'w', newline='') as csvfile: # noqa: PTH123
+ csvWriter = csv.writer(csvfile) # noqa: N806
csvWriter.writerows(data_to_write)
logfile.write(f'\n\t\t\tWrote to file {resultsFilePath}')
# Finished writing data
-def run_TMCMC(
+def run_TMCMC( # noqa: N802, PLR0913
number_of_samples,
number_of_chains,
all_distributions_list,
- number_of_MCMC_steps,
- max_number_of_MCMC_steps,
+ number_of_MCMC_steps, # noqa: N803
+ max_number_of_MCMC_steps, # noqa: N803
log_likelihood_function,
model_parameters,
working_directory,
@@ -202,21 +202,21 @@ def run_TMCMC(
shift_factors,
run_type,
logfile,
- MPI_size,
+ MPI_size, # noqa: N803
driver_file,
- parallelize_MCMC=True,
+ parallelize_MCMC=True, # noqa: FBT002, N803
model_number=0,
total_number_of_models_in_ensemble=1,
):
- """Runs TMCMC Algorithm"""
+ """Runs TMCMC Algorithm""" # noqa: D400, D401
# Initialize (beta, effective sample size)
beta = 0
effective_sample_size = number_of_samples
mytrace = []
# Initialize other TMCMC variables
- number_of_MCMC_steps = number_of_MCMC_steps
- adaptively_calculate_num_MCMC_steps = True
+ number_of_MCMC_steps = number_of_MCMC_steps # noqa: N806, PLW0127
+ adaptively_calculate_num_MCMC_steps = True # noqa: N806
adaptively_scale_proposal_covariance = True
scale_factor_for_proposal_covariance = 1 # cov scale factor
# model_evidence = 1 # model evidence
@@ -465,7 +465,7 @@ def run_TMCMC(
posterior_pdf_vals_list,
num_accepts,
all_proposals,
- all_PLP,
+ all_PLP, # noqa: N806
preds_list,
) = zip(*results)
# for next beta
@@ -477,7 +477,7 @@ def run_TMCMC(
num_accepts = np.asarray(num_accepts)
number_of_accepted_states_in_this_stage = sum(num_accepts)
all_proposals = np.asarray(all_proposals)
- all_PLP = np.asarray(all_PLP)
+ all_PLP = np.asarray(all_PLP) # noqa: N806
total_number_of_model_evaluations += (
number_of_model_evaluations_in_this_stage
@@ -487,7 +487,7 @@ def run_TMCMC(
)
# total observed acceptance rate
- R = (
+ R = ( # noqa: N806
number_of_accepted_states_in_this_stage
/ number_of_model_evaluations_in_this_stage
)
@@ -501,13 +501,13 @@ def run_TMCMC(
adaptively_calculate_num_MCMC_steps
): # Calculate Nm_steps based on observed acceptance rate
# increase max Nmcmc with stage number
- number_of_MCMC_steps = min(
+ number_of_MCMC_steps = min( # noqa: N806
number_of_MCMC_steps + 1, max_number_of_MCMC_steps
)
logfile.write('\n\t\tadapted max MCMC steps = %d' % number_of_MCMC_steps)
acc_rate = max(1.0 / number_of_model_evaluations_in_this_stage, R)
- number_of_MCMC_steps = min(
+ number_of_MCMC_steps = min( # noqa: N806
number_of_MCMC_steps,
1 + int(np.log(1 - 0.99) / np.log(1 - acc_rate)),
)
diff --git a/modules/performUQ/UCSD_UQ/tmcmcFunctions.py b/modules/performUQ/UCSD_UQ/tmcmcFunctions.py
index e0597ff05..8f331e0c6 100644
--- a/modules/performUQ/UCSD_UQ/tmcmcFunctions.py
+++ b/modules/performUQ/UCSD_UQ/tmcmcFunctions.py
@@ -1,98 +1,98 @@
"""authors: Mukesh Kumar Ramancha, Maitreya Manoj Kurumbhati, and Prof. J.P. Conte
affiliation: University of California, San Diego
-"""
+""" # noqa: INP001, D205, D400
import numpy as np
from runFEM import runFEM
from scipy.special import logsumexp
-def initial_population(N, p):
- IniPop = np.zeros((N, len(p)))
+def initial_population(N, p): # noqa: N803, D103
+ IniPop = np.zeros((N, len(p))) # noqa: N806
for i in range(len(p)):
IniPop[:, i] = p[i].generate_rns(N)
return IniPop
-def log_prior(s, p):
- logP = 0
+def log_prior(s, p): # noqa: D103
+ logP = 0 # noqa: N806
for i in range(len(s)):
- logP = logP + p[i].log_pdf_eval(s[i])
+ logP = logP + p[i].log_pdf_eval(s[i]) # noqa: N806
return logP
-def propose(current, covariance, n):
+def propose(current, covariance, n): # noqa: D103
return np.random.multivariate_normal(current, covariance, n)
-def compute_beta(beta, likelihoods, prev_ESS, threshold):
+def compute_beta(beta, likelihoods, prev_ESS, threshold): # noqa: N803, D103
old_beta = beta
min_beta = beta
max_beta = 2.0
# rN = int(len(likelihoods) * 0.95) #pymc3 uses 0.5
- rN = threshold * prev_ESS # purdue prof uses 0.95
+ rN = threshold * prev_ESS # purdue prof uses 0.95 # noqa: N806
new_beta = beta
- while max_beta - min_beta > 1e-3:
+ while max_beta - min_beta > 1e-3: # noqa: PLR2004
new_beta = 0.5 * (max_beta + min_beta)
# plausible weights of Sm corresponding to new beta
inc_beta = new_beta - old_beta
- Wm = np.exp(inc_beta * (likelihoods - likelihoods.max()))
- ESS = int(1 / np.sum((Wm / sum(Wm)) ** 2))
+ Wm = np.exp(inc_beta * (likelihoods - likelihoods.max())) # noqa: N806
+ ESS = int(1 / np.sum((Wm / sum(Wm)) ** 2)) # noqa: N806
if rN == ESS:
break
- elif rN > ESS:
+ elif rN > ESS: # noqa: RET508
max_beta = new_beta
else:
min_beta = new_beta
- if new_beta < 1e-3:
+ if new_beta < 1e-3: # noqa: PLR2004
new_beta = 1e-3
inc_beta = new_beta - old_beta
- Wm = np.exp(inc_beta * (likelihoods - likelihoods.max()))
+ Wm = np.exp(inc_beta * (likelihoods - likelihoods.max())) # noqa: N806
- if new_beta >= 0.95:
+ if new_beta >= 0.95: # noqa: PLR2004
new_beta = 1
# plausible weights of Sm corresponding to new beta
inc_beta = new_beta - old_beta
- Wm = np.exp(inc_beta * (likelihoods - likelihoods.max()))
+ Wm = np.exp(inc_beta * (likelihoods - likelihoods.max())) # noqa: N806
return new_beta, Wm, ESS
-def compute_beta_evidence_old(
+def compute_beta_evidence_old( # noqa: D103
beta,
log_likelihoods,
log_evidence,
- prev_ESS,
+ prev_ESS, # noqa: N803
threshold,
):
old_beta = beta
min_beta = beta
max_beta = 2.0
- N = len(log_likelihoods)
- min_ESS = np.ceil(0.1 * N)
- rN = max(threshold * prev_ESS, min_ESS)
+ N = len(log_likelihoods) # noqa: N806
+ min_ESS = np.ceil(0.1 * N) # noqa: N806
+ rN = max(threshold * prev_ESS, min_ESS) # noqa: N806
new_beta = 0.5 * (max_beta + min_beta)
inc_beta = new_beta - old_beta
- log_Wm = inc_beta * log_likelihoods
- log_Wm_n = log_Wm - logsumexp(log_Wm)
- ESS = int(np.exp(-logsumexp(log_Wm_n * 2)))
+ log_Wm = inc_beta * log_likelihoods # noqa: N806
+ log_Wm_n = log_Wm - logsumexp(log_Wm) # noqa: N806
+ ESS = int(np.exp(-logsumexp(log_Wm_n * 2))) # noqa: N806
- while max_beta - min_beta > 1e-6: # min step size
+ while max_beta - min_beta > 1e-6: # min step size # noqa: PLR2004
new_beta = 0.5 * (max_beta + min_beta)
# plausible weights of Sm corresponding to new beta
inc_beta = new_beta - old_beta
- log_Wm = inc_beta * log_likelihoods
- log_Wm_n = log_Wm - logsumexp(log_Wm)
- ESS = int(np.exp(-logsumexp(log_Wm_n * 2)))
+ log_Wm = inc_beta * log_likelihoods # noqa: N806
+ log_Wm_n = log_Wm - logsumexp(log_Wm) # noqa: N806
+ ESS = int(np.exp(-logsumexp(log_Wm_n * 2))) # noqa: N806
if rN == ESS:
break
- elif rN > ESS:
+ elif rN > ESS: # noqa: RET508
max_beta = new_beta
else:
min_beta = new_beta
@@ -102,11 +102,11 @@ def compute_beta_evidence_old(
# plausible weights of Sm corresponding to new beta
inc_beta = new_beta - old_beta
- log_Wm = inc_beta * log_likelihoods
- log_Wm_n = log_Wm - logsumexp(log_Wm)
+ log_Wm = inc_beta * log_likelihoods # noqa: N806
+ log_Wm_n = log_Wm - logsumexp(log_Wm) # noqa: N806
- Wm = np.exp(log_Wm)
- Wm_n = np.exp(log_Wm_n)
+ Wm = np.exp(log_Wm) # noqa: N806, F841
+ Wm_n = np.exp(log_Wm_n) # noqa: N806
# update model evidence
# evidence = evidence * (sum(Wm)/N)
@@ -117,32 +117,32 @@ def compute_beta_evidence_old(
# MCMC
-def MCMC_MH_old(
- ParticleNum,
- Em,
- Nm_steps,
+def MCMC_MH_old( # noqa: N802, D103, PLR0913
+ ParticleNum, # noqa: N803
+ Em, # noqa: N803
+ Nm_steps, # noqa: N803
current,
likelihood_current,
posterior_current,
beta,
- numAccepts,
- AllPars,
+ numAccepts, # noqa: N803
+ AllPars, # noqa: N803
log_likelihood,
variables,
- resultsLocation,
+ resultsLocation, # noqa: N803
rng,
- calibrationData,
- numExperiments,
- covarianceMatrixList,
- edpNamesList,
- edpLengthsList,
- normalizingFactors,
- locShiftList,
- workflowDriver,
+ calibrationData, # noqa: N803
+ numExperiments, # noqa: N803
+ covarianceMatrixList, # noqa: N803
+ edpNamesList, # noqa: N803
+ edpLengthsList, # noqa: N803
+ normalizingFactors, # noqa: N803
+ locShiftList, # noqa: N803
+ workflowDriver, # noqa: N803
prediction_current,
):
all_proposals = []
- all_PLP = []
+ all_PLP = [] # noqa: N806
# deltas = propose(np.zeros(len(current)), Em, Nm_steps)
deltas = rng.multivariate_normal(np.zeros(len(current)), Em, Nm_steps)
@@ -188,7 +188,7 @@ def MCMC_MH_old(
current = proposal
posterior_current = posterior_proposal
likelihood_current = likelihood_proposal
- numAccepts += 1
+ numAccepts += 1 # noqa: N806
prediction_current = prediction_proposal
# gather all last samples
@@ -204,32 +204,32 @@ def MCMC_MH_old(
# MCMC
-def MCMC_MH(
- ParticleNum,
- Em,
- Nm_steps,
+def MCMC_MH( # noqa: N802, D103, PLR0913
+ ParticleNum, # noqa: N803
+ Em, # noqa: N803
+ Nm_steps, # noqa: N803
current,
likelihood_current,
posterior_current,
beta,
- numAccepts,
- AllPars,
+ numAccepts, # noqa: N803
+ AllPars, # noqa: N803
log_likelihood,
variables,
- resultsLocation,
+ resultsLocation, # noqa: N803
rng,
- calibrationData,
- numExperiments,
- covarianceMatrixList,
- edpNamesList,
- edpLengthsList,
- normalizingFactors,
- locShiftList,
- workflowDriver,
+ calibrationData, # noqa: N803
+ numExperiments, # noqa: N803
+ covarianceMatrixList, # noqa: N803
+ edpNamesList, # noqa: N803
+ edpLengthsList, # noqa: N803
+ normalizingFactors, # noqa: N803
+ locShiftList, # noqa: N803
+ workflowDriver, # noqa: N803
prediction_current,
):
all_proposals = []
- all_PLP = []
+ all_PLP = [] # noqa: N806
# deltas = propose(np.zeros(len(current)), Em, Nm_steps)
deltas = rng.multivariate_normal(np.zeros(len(current)), Em, Nm_steps)
@@ -275,7 +275,7 @@ def MCMC_MH(
current = proposal
posterior_current = posterior_proposal
likelihood_current = likelihood_proposal
- numAccepts += 1
+ numAccepts += 1 # noqa: N806
prediction_current = prediction_proposal
# gather all last samples
@@ -331,7 +331,7 @@ def MCMC_MH(
# return new_beta, log_evidence, Wm_n, ESS
-def get_weights(dBeta, log_likelihoods):
+def get_weights(dBeta, log_likelihoods): # noqa: N803, D103
log_weights = dBeta * log_likelihoods
log_sum_weights = logsumexp(log_weights)
log_weights_normalized = log_weights - log_sum_weights
@@ -342,14 +342,14 @@ def get_weights(dBeta, log_likelihoods):
return weights_normalized, cov_weights, std_weights_normalized
-def compute_beta_evidence(beta, log_likelihoods, logFile, threshold=1.0):
+def compute_beta_evidence(beta, log_likelihoods, logFile, threshold=1.0): # noqa: N803, D103
max_beta = 1.0
- dBeta = min(max_beta, 1.0 - beta)
+ dBeta = min(max_beta, 1.0 - beta) # noqa: N806
weights, cov_weights, std_weights = get_weights(dBeta, log_likelihoods)
while cov_weights > (threshold) or (std_weights == 0):
- dBeta = dBeta * 0.99
+ dBeta = dBeta * 0.99 # noqa: N806
# while (cov_weights > (threshold+0.00000005) or (std_weights == 0)):
# if ((cov_weights > (threshold+1.0)) or (std_weights == 0)):
@@ -371,21 +371,21 @@ def compute_beta_evidence(beta, log_likelihoods, logFile, threshold=1.0):
# if ((cov_weights > (threshold+0.00000005)) or (std_weights == 0)):
# dBeta = dBeta*0.99999999
- if dBeta < 1e-3:
- dBeta = 1e-3
+ if dBeta < 1e-3: # noqa: PLR2004
+ dBeta = 1e-3 # noqa: N806
weights, cov_weights, std_weights = get_weights(dBeta, log_likelihoods)
break
weights, cov_weights, std_weights = get_weights(dBeta, log_likelihoods)
beta = beta + dBeta
- if beta > 0.95:
+ if beta > 0.95: # noqa: PLR2004
beta = 1
log_evidence = logsumexp(dBeta * log_likelihoods) - np.log(len(log_likelihoods))
try:
- ESS = int(1 / np.sum((weights / np.sum(weights)) ** 2))
+ ESS = int(1 / np.sum((weights / np.sum(weights)) ** 2)) # noqa: N806
except OverflowError as err:
- ESS = 0
+ ESS = 0 # noqa: N806
logFile.write(str(err))
return beta, log_evidence, weights, ESS
diff --git a/modules/performUQ/UQpy/UQpyEngine.py b/modules/performUQ/UQpy/UQpyEngine.py
index 4c503b6f5..692df6112 100644
--- a/modules/performUQ/UQpy/UQpyEngine.py
+++ b/modules/performUQ/UQpy/UQpyEngine.py
@@ -1,11 +1,11 @@
-# written: UQ team @ SimCenter
+# written: UQ team @ SimCenter # noqa: INP001, D100
# import functions for Python 2.X support
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
@@ -37,51 +37,51 @@
@click.option(
'--runType', required=True, type=click.Choice(['runningLocal', 'runningRemote'])
)
-def main(workflowinput, workflowoutput, driverfile, runtype):
+def main(workflowinput, workflowoutput, driverfile, runtype): # noqa: ARG001, D103
python = sys.executable
# get os type
- osType = platform.system()
+ osType = platform.system() # noqa: N806
if runtype == 'runningLocal':
if (
sys.platform == 'darwin'
or sys.platform == 'linux'
or sys.platform == 'linux2'
):
- osType = 'Linux'
+ osType = 'Linux' # noqa: N806
else:
driverfile = driverfile + '.bat'
- osType = 'Windows'
+ osType = 'Windows' # noqa: N806
elif runtype == 'runningRemote':
- osType = 'Linux'
+ osType = 'Linux' # noqa: N806
- thisScriptDir = os.path.dirname(os.path.realpath(__file__))
+ thisScriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N806
- os.chmod(
+ os.chmod( # noqa: PTH101
f'{thisScriptDir}/preprocessUQpy.py',
stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR | stat.S_IXOTH,
)
# 1. Create the UQy analysis python script
- preprocessorCommand = f"'{python}' '{thisScriptDir}/preprocessUQpy.py' --workflowInput {workflowinput} --driverFile {driverfile} --runType {runtype} --osType {osType}"
+ preprocessorCommand = f"'{python}' '{thisScriptDir}/preprocessUQpy.py' --workflowInput {workflowinput} --driverFile {driverfile} --runType {runtype} --osType {osType}" # noqa: N806
- subprocess.run(preprocessorCommand, shell=True, check=False)
+ subprocess.run(preprocessorCommand, shell=True, check=False) # noqa: S602
if runtype == 'runningLocal':
- os.chmod(
+ os.chmod( # noqa: PTH101
driverfile, stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR | stat.S_IXOTH
)
# 2. Run the python script
- UQpycommand = python + ' UQpyAnalysis.py' + ' 1> uqpy.log 2>&1 '
+ UQpycommand = python + ' UQpyAnalysis.py' + ' 1> uqpy.log 2>&1 ' # noqa: N806
# Change permission of workflow driver
- st = os.stat(driverfile)
- os.chmod(driverfile, st.st_mode | stat.S_IEXEC)
+ st = os.stat(driverfile) # noqa: PTH116
+ os.chmod(driverfile, st.st_mode | stat.S_IEXEC) # noqa: PTH101
if runtype == 'runningLocal':
- print('running UQpy: ', UQpycommand)
- subprocess.run(
+ print('running UQpy: ', UQpycommand) # noqa: T201
+ subprocess.run( # noqa: S602
UQpycommand, stderr=subprocess.STDOUT, shell=True, check=False
)
diff --git a/modules/performUQ/UQpy/preprocessUQpy.py b/modules/performUQ/UQpy/preprocessUQpy.py
index 605c65aae..5d7845585 100755
--- a/modules/performUQ/UQpy/preprocessUQpy.py
+++ b/modules/performUQ/UQpy/preprocessUQpy.py
@@ -1,4 +1,4 @@
-import click
+import click # noqa: EXE002, INP001, D100
from src.quofemDTOs import Model
from src.runmodel.RunModelDTOs import RunModelDTO
@@ -26,7 +26,7 @@
type=click.Choice(['Linux', 'Windows']),
help='Type of operating system the workflow will run on.',
)
-def preprocess(workflowinput, driverfile, runtype, ostype):
+def preprocess(workflowinput, driverfile, runtype, ostype): # noqa: ARG001, D103
# 1. Parse the input JSON file
model = Model.parse_file(workflowinput)
@@ -39,7 +39,7 @@ def preprocess(workflowinput, driverfile, runtype, ostype):
code.append('#\n# Creating the random variable distributions\n#')
marginals_code = 'marginals = JointIndependent(['
for distribution in model.randomVariables:
- (distribution_code, input) = distribution.init_to_text()
+ (distribution_code, input) = distribution.init_to_text() # noqa: A001
code.append(distribution_code)
marginals_code += input + ', '
marginals_code += '])'
@@ -59,7 +59,7 @@ def preprocess(workflowinput, driverfile, runtype, ostype):
code.append(uqmethod_code)
# 3. Write code to analysis script
- with open('UQpyAnalysis.py', 'w') as outfile:
+ with open('UQpyAnalysis.py', 'w') as outfile: # noqa: PTH123
outfile.write('\n'.join(code))
diff --git a/modules/performUQ/UQpy/src/UQpyDTO.py b/modules/performUQ/UQpy/src/UQpyDTO.py
index fa240d593..5f19a57a8 100644
--- a/modules/performUQ/UQpy/src/UQpyDTO.py
+++ b/modules/performUQ/UQpy/src/UQpyDTO.py
@@ -1,16 +1,16 @@
-from py_linq import Enumerable
+from py_linq import Enumerable # noqa: INP001, D100
from pydantic import BaseModel
-class UQpyDTO(BaseModel):
+class UQpyDTO(BaseModel): # noqa: D101
@staticmethod
- def is_primitive(obj):
+ def is_primitive(obj): # noqa: D102
return not hasattr(obj, '__dict__')
# def init_to_text(self) -> (str, str):
# pass
- def generate_code(self):
+ def generate_code(self): # noqa: D102
prerequisite_list = ''
fields = Enumerable(self.__dict__.items())
objects = fields.where(lambda x: not UQpyDTO.is_primitive(x[1]))
diff --git a/modules/performUQ/UQpy/src/distributions/UniformDTOs.py b/modules/performUQ/UQpy/src/distributions/UniformDTOs.py
index 332906de1..9b2c1b5e9 100644
--- a/modules/performUQ/UQpy/src/distributions/UniformDTOs.py
+++ b/modules/performUQ/UQpy/src/distributions/UniformDTOs.py
@@ -1,32 +1,32 @@
-from typing import Literal, Union
+from typing import Literal, Union # noqa: INP001, D100
import numpy as np
from pydantic import BaseModel, Field, PositiveFloat, validator
from typing_extensions import Annotated
-class RVCommonData(BaseModel):
+class RVCommonData(BaseModel): # noqa: D101
name: str
value: str
- refCount: int
+ refCount: int # noqa: N815
-class UniformParameters(RVCommonData):
- variableClass: Literal['Uncertain']
+class UniformParameters(RVCommonData): # noqa: D101
+ variableClass: Literal['Uncertain'] # noqa: N815
distribution: Literal['Uniform']
- inputType: Literal['Parameters']
+ inputType: Literal['Parameters'] # noqa: N815
lowerbound: float = 0.0
upperbound: float = 1.0
@validator('upperbound')
- def upper_bound_not_bigger_than_lower_bound(v, values):
+ def upper_bound_not_bigger_than_lower_bound(v, values): # noqa: N805, D102
if 'lowerbound' in values and v <= values['lowerbound']:
- raise ValueError(
- f"The upper bound must be bigger than the lower bound {values['lowerbound']}. Got a value of {v}."
+ raise ValueError( # noqa: TRY003
+ f"The upper bound must be bigger than the lower bound {values['lowerbound']}. Got a value of {v}." # noqa: EM102
)
return v
- def init_to_text(self):
+ def init_to_text(self): # noqa: D102
from UQpy.distributions.collection.Uniform import Uniform
c = Uniform
@@ -46,12 +46,12 @@ def _to_scipy(self):
return {'loc': loc, 'scale': scale}
-class UniformMoments(RVCommonData):
- variableClass: Literal['Uncertain']
+class UniformMoments(RVCommonData): # noqa: D101
+ variableClass: Literal['Uncertain'] # noqa: N815
distribution: Literal['Uniform']
- inputType: Literal['Moments']
+ inputType: Literal['Moments'] # noqa: N815
mean: float
- standardDev: PositiveFloat
+ standardDev: PositiveFloat # noqa: N815
def _to_scipy(self):
loc = self.mean - np.sqrt(12) * self.standardDev / 2
@@ -59,11 +59,11 @@ def _to_scipy(self):
return {'loc': loc, 'scale': scale}
-class UniformDataset(RVCommonData):
- variableClass: Literal['Uncertain']
+class UniformDataset(RVCommonData): # noqa: D101
+ variableClass: Literal['Uncertain'] # noqa: N815
distribution: Literal['Uniform']
- inputType: Literal['Dataset']
- dataDir: str
+ inputType: Literal['Dataset'] # noqa: N815
+ dataDir: str # noqa: N815
def _to_scipy(self):
data = readFile(self.dataDir)
@@ -72,8 +72,8 @@ def _to_scipy(self):
return {'loc': low, 'scale': high - low}
-def readFile(path):
- with open(path) as f:
+def readFile(path): # noqa: N802, D103
+ with open(path) as f: # noqa: PTH123
return np.genfromtxt(f)
diff --git a/modules/performUQ/UQpy/src/modules/ModuleDTOs.py b/modules/performUQ/UQpy/src/modules/ModuleDTOs.py
index d9e487f6f..0a52fdcb4 100644
--- a/modules/performUQ/UQpy/src/modules/ModuleDTOs.py
+++ b/modules/performUQ/UQpy/src/modules/ModuleDTOs.py
@@ -1,28 +1,28 @@
-from typing import Literal, Union
+from typing import Literal, Union # noqa: INP001, D100
from pydantic import BaseModel, Field
from src.reliability.ReliabilityMethodsDTOs import ReliabilityMethod
from typing_extensions import Annotated
-class ModuleBaseDTO(BaseModel):
+class ModuleBaseDTO(BaseModel): # noqa: D101
pass
-class SamplingDTO(ModuleBaseDTO):
- uqType: Literal['Sampling'] = 'Sampling'
+class SamplingDTO(ModuleBaseDTO): # noqa: D101
+ uqType: Literal['Sampling'] = 'Sampling' # noqa: N815
- def generate_code(self):
+ def generate_code(self): # noqa: D102
pass
-class SurrogatesDTO(ModuleBaseDTO):
- uqType: Literal['Surrogates'] = 'Surrogates'
+class SurrogatesDTO(ModuleBaseDTO): # noqa: D101
+ uqType: Literal['Surrogates'] = 'Surrogates' # noqa: N815
-class ReliabilityDTO(ModuleBaseDTO):
- uqType: Literal['Reliability Analysis'] = 'Reliability Analysis'
- methodData: ReliabilityMethod
+class ReliabilityDTO(ModuleBaseDTO): # noqa: D101
+ uqType: Literal['Reliability Analysis'] = 'Reliability Analysis' # noqa: N815
+ methodData: ReliabilityMethod # noqa: N815
ModuleDTO = Annotated[
diff --git a/modules/performUQ/UQpy/src/quofemDTOs.py b/modules/performUQ/UQpy/src/quofemDTOs.py
index 95cdf716c..60243b896 100644
--- a/modules/performUQ/UQpy/src/quofemDTOs.py
+++ b/modules/performUQ/UQpy/src/quofemDTOs.py
@@ -1,72 +1,72 @@
-from __future__ import annotations
+from __future__ import annotations # noqa: INP001, D100
from typing import Any, Dict, List
from pydantic import BaseModel
-from .distributions.UniformDTOs import DistributionDTO
-from .modules.ModuleDTOs import ModuleDTO
-from .sampling.mcmc.StretchDto import StretchDto
+from .distributions.UniformDTOs import DistributionDTO # noqa: TCH001
+from .modules.ModuleDTOs import ModuleDTO # noqa: TCH001
+from .sampling.mcmc.StretchDto import StretchDto # noqa: TCH001
-class ApplicationData(BaseModel):
+class ApplicationData(BaseModel): # noqa: D101
MS_Path: str
- mainScript: str
- postprocessScript: str
+ mainScript: str # noqa: N815
+ postprocessScript: str # noqa: N815
-class FEM(BaseModel):
+class FEM(BaseModel): # noqa: D101
Application: str
ApplicationData: ApplicationData
-class UQ(BaseModel):
+class UQ(BaseModel): # noqa: D101
Application: str
- ApplicationData: Dict[str, Any]
+ ApplicationData: Dict[str, Any] # noqa: UP006
-class Applications(BaseModel):
+class Applications(BaseModel): # noqa: D101
FEM: FEM
UQ: UQ
-class EDPItem(BaseModel):
+class EDPItem(BaseModel): # noqa: D101
length: int
name: str
type: str
-class SubsetSimulationData(BaseModel):
- conditionalProbability: float
- failureThreshold: int
- maxLevels: int
- mcmcMethodData: StretchDto
+class SubsetSimulationData(BaseModel): # noqa: D101
+ conditionalProbability: float # noqa: N815
+ failureThreshold: int # noqa: N815
+ maxLevels: int # noqa: N815
+ mcmcMethodData: StretchDto # noqa: N815
-class ReliabilityMethodData(BaseModel):
+class ReliabilityMethodData(BaseModel): # noqa: D101
method: str
- subsetSimulationData: SubsetSimulationData
+ subsetSimulationData: SubsetSimulationData # noqa: N815
-class RandomVariable(BaseModel):
+class RandomVariable(BaseModel): # noqa: D101
distribution: str
- inputType: str
+ inputType: str # noqa: N815
lowerbound: int
name: str
- refCount: int
+ refCount: int # noqa: N815
upperbound: int
value: str
- variableClass: str
+ variableClass: str # noqa: N815
-class Model(BaseModel):
+class Model(BaseModel): # noqa: D101
Applications: Applications
- EDP: List[EDPItem]
- FEM: Dict[str, Any]
+ EDP: List[EDPItem] # noqa: UP006
+ FEM: Dict[str, Any] # noqa: UP006
UQ: ModuleDTO
# correlationMatrix: List[int]
- localAppDir: str
- randomVariables: List[DistributionDTO]
- remoteAppDir: str
- runType: str
- workingDir: str
+ localAppDir: str # noqa: N815
+ randomVariables: List[DistributionDTO] # noqa: N815, UP006
+ remoteAppDir: str # noqa: N815
+ runType: str # noqa: N815
+ workingDir: str # noqa: N815
diff --git a/modules/performUQ/UQpy/src/reliability/ReliabilityMethodsDTOs.py b/modules/performUQ/UQpy/src/reliability/ReliabilityMethodsDTOs.py
index b444e554a..083617df3 100644
--- a/modules/performUQ/UQpy/src/reliability/ReliabilityMethodsDTOs.py
+++ b/modules/performUQ/UQpy/src/reliability/ReliabilityMethodsDTOs.py
@@ -1,4 +1,4 @@
-from typing import Literal, Union
+from typing import Literal, Union # noqa: INP001, D100
from pydantic import Field
from src.sampling.mcmc.StretchDto import SamplingMethod
@@ -6,22 +6,22 @@
from typing_extensions import Annotated
-class ReliabilityMethodBaseDTO(UQpyDTO):
+class ReliabilityMethodBaseDTO(UQpyDTO): # noqa: D101
pass
-class SubsetSimulationDTO(ReliabilityMethodBaseDTO):
+class SubsetSimulationDTO(ReliabilityMethodBaseDTO): # noqa: D101
method: Literal['Subset Simulation'] = 'Subset Simulation'
- conditionalProbability: float
+ conditionalProbability: float # noqa: N815
failure_threshold: float = Field(..., alias='failureThreshold')
- maxLevels: int
+ maxLevels: int # noqa: N815
samples_per_subset: int
- samplingMethod: SamplingMethod
+ samplingMethod: SamplingMethod # noqa: N815
# def __post_init__(self):
# self.samplingMethod.n_chains=int(self.samples_per_subset*self.conditionalProbability)
- def init_to_text(self):
+ def init_to_text(self): # noqa: D102
from UQpy.reliability.SubsetSimulation import (
SubsetSimulation,
)
@@ -83,7 +83,7 @@ def init_to_text(self):
'\tfile.write(json.dumps(output_data))\n'
)
- prerequisite_str = '\n'.join(
+ prerequisite_str = '\n'.join( # noqa: FLY002
[
initial_sampler,
import_statement,
@@ -109,15 +109,15 @@ def __create_postprocess_script(self, results_filename: str = 'results.out'):
'\t\telse:',
f'\t\t\treturn {self.failure_threshold} - res',
'\telse:',
- "\t\traise ValueError(f'Result not found in results.out file for sample evaluation "
+ "\t\traise ValueError(f'Result not found in results.out file for sample evaluation " # noqa: ISC003
+ "{index}')",
]
- with open('postprocess_script.py', 'w') as f:
+ with open('postprocess_script.py', 'w') as f: # noqa: PTH123
f.write('\n'.join(postprocess_script_code))
-class FormDTO(ReliabilityMethodBaseDTO):
+class FormDTO(ReliabilityMethodBaseDTO): # noqa: D101
method: Literal['FORM'] = 'FORM'
diff --git a/modules/performUQ/UQpy/src/runmodel/RunModelDTOs.py b/modules/performUQ/UQpy/src/runmodel/RunModelDTOs.py
index 70fffdccb..a31543819 100644
--- a/modules/performUQ/UQpy/src/runmodel/RunModelDTOs.py
+++ b/modules/performUQ/UQpy/src/runmodel/RunModelDTOs.py
@@ -1,13 +1,13 @@
-from pathlib import Path
+from pathlib import Path # noqa: INP001, D100
from typing import List
from src.quofemDTOs import RandomVariable
-class RunModelDTO:
+class RunModelDTO: # noqa: D101
@staticmethod
- def create_runmodel_with_variables_driver(
- variables: List[RandomVariable],
+ def create_runmodel_with_variables_driver( # noqa: D102
+ variables: List[RandomVariable], # noqa: FA100
driver_filename: str = 'driver',
):
RunModelDTO.__create_runmodel_input_teplate(variables)
@@ -15,9 +15,9 @@ def create_runmodel_with_variables_driver(
RunModelDTO.__create_postprocess_script()
# Validate file paths
- input_template = Path('params_template.in')
- model_script = Path('model_script.py')
- output_script = Path('postprocess_script.py')
+ input_template = Path('params_template.in') # noqa: F841
+ model_script = Path('model_script.py') # noqa: F841
+ output_script = Path('postprocess_script.py') # noqa: F841
var_names = [f'{rv.name}' for rv in variables]
run_model_code = [
@@ -30,12 +30,12 @@ def create_runmodel_with_variables_driver(
return '\n'.join(run_model_code)
@staticmethod
- def __create_runmodel_input_teplate(variables: List[RandomVariable]):
+ def __create_runmodel_input_teplate(variables: List[RandomVariable]): # noqa: FA100
template_code = [f'{len(variables)}']
for rv in variables:
- template_code.append(f'{rv.name} <{rv.name}>')
+ template_code.append(f'{rv.name} <{rv.name}>') # noqa: PERF401
- with open('params_template.in', 'w') as f:
+ with open('params_template.in', 'w') as f: # noqa: PTH123
f.write('\n'.join(template_code))
@staticmethod
@@ -47,7 +47,7 @@ def __create_model_script(driver_filename):
'import subprocess',
'import fire\n',
'def model(sample_index: int) -> None:',
- f"\tcommand1 = f'mv ./InputFiles/{template_file_base}_"
+ f"\tcommand1 = f'mv ./InputFiles/{template_file_base}_" # noqa: ISC003
+ '{sample_index}'
+ f"{template_file_suffix} ./params.in'",
f"\tcommand2 = './{driver_filename}'\n",
@@ -57,7 +57,7 @@ def __create_model_script(driver_filename):
'\tfire.Fire(model)',
]
- with open('model_script.py', 'w') as f:
+ with open('model_script.py', 'w') as f: # noqa: PTH123
f.write('\n'.join(model_script_code))
@staticmethod
@@ -76,9 +76,9 @@ def __create_postprocess_script(results_filename: str = 'results.out'):
'\t\telse:',
'\t\t\treturn res',
'\telse:',
- "\t\traise ValueError(f'Result not found in results.out file for sample evaluation "
+ "\t\traise ValueError(f'Result not found in results.out file for sample evaluation " # noqa: ISC003
+ "{index}')",
]
- with open('postprocess_script.py', 'w') as f:
+ with open('postprocess_script.py', 'w') as f: # noqa: PTH123
f.write('\n'.join(postprocess_script_code))
diff --git a/modules/performUQ/UQpy/src/sampling/mcmc/ModifiedMetropolisHastingsDto.py b/modules/performUQ/UQpy/src/sampling/mcmc/ModifiedMetropolisHastingsDto.py
index ad3a5f5a9..9ad0e269b 100644
--- a/modules/performUQ/UQpy/src/sampling/mcmc/ModifiedMetropolisHastingsDto.py
+++ b/modules/performUQ/UQpy/src/sampling/mcmc/ModifiedMetropolisHastingsDto.py
@@ -1,10 +1,10 @@
-from typing import Literal
+from typing import Literal # noqa: INP001, D100
from pydantic import Field
from src.UQpyDTO import UQpyDTO
-class ModifiedMetropolisHastingsDto(UQpyDTO):
+class ModifiedMetropolisHastingsDto(UQpyDTO): # noqa: D101
method: Literal['Modified Metropolis Hastings'] = 'Modified Metropolis Hastings'
burn_length: int = Field(default=0, alias='burn-in', ge=0)
jump: int = Field(default=1, ge=0)
@@ -15,7 +15,7 @@ class ModifiedMetropolisHastingsDto(UQpyDTO):
concatenate_chains = True
proposal_is_symmetric = False
- def init_to_text(self):
+ def init_to_text(self): # noqa: D102
from UQpy.sampling.mcmc.ModifiedMetropolisHastings import (
ModifiedMetropolisHastings,
)
diff --git a/modules/performUQ/UQpy/src/sampling/mcmc/StretchDto.py b/modules/performUQ/UQpy/src/sampling/mcmc/StretchDto.py
index da6fefe18..be19e99c4 100644
--- a/modules/performUQ/UQpy/src/sampling/mcmc/StretchDto.py
+++ b/modules/performUQ/UQpy/src/sampling/mcmc/StretchDto.py
@@ -1,4 +1,4 @@
-from __future__ import annotations
+from __future__ import annotations # noqa: INP001, D100
from typing import Literal, Union
@@ -9,7 +9,7 @@
from src.UQpyDTO import UQpyDTO
-class StretchDto(UQpyDTO):
+class StretchDto(UQpyDTO): # noqa: D101
method: Literal['Stretch'] = 'Stretch'
burn_length: int = Field(default=0, alias='burn-in', ge=0)
jump: int = Field(default=1, ge=0)
@@ -18,7 +18,7 @@ class StretchDto(UQpyDTO):
random_state: int = Field(..., alias='randomState')
scale: float = Field(..., gt=0)
- def init_to_text(self):
+ def init_to_text(self): # noqa: D102
from UQpy.sampling.mcmc.Stretch import Stretch
c = Stretch
diff --git a/modules/performUQ/common/ERAClasses/ERACond.py b/modules/performUQ/common/ERAClasses/ERACond.py
index 974307bdb..1ce36b6a1 100644
--- a/modules/performUQ/common/ERAClasses/ERACond.py
+++ b/modules/performUQ/common/ERAClasses/ERACond.py
@@ -1,4 +1,4 @@
-# import of modules
+# import of modules # noqa: INP001, D100
import types
import numpy as np
@@ -45,7 +45,7 @@
References:
1. Documentation of the ERA Distribution Classes
---------------------------------------------------------------------------
-"""
+""" # noqa: W291
# %%
@@ -124,34 +124,34 @@ class ERACond:
Uniform: Obj = ERADist('uniform','MOM',lambda ... :[mean,std])
Weibull: Obj = ERADist('weibull','MOM',lambda ... :[mean,std])
- """
+ """ # noqa: D205
- def __init__(self, name, opt, param, ID=False):
+ def __init__(self, name, opt, param, ID=False): # noqa: FBT002, N803
"""Constructor method, for more details have a look at the
class description.
- """
+ """ # noqa: D205, D401
self.Name = name.lower()
if opt.upper() == 'PAR' or opt.upper() == 'MOM':
self.Opt = opt.upper()
else:
- raise RuntimeError(
- 'Conditional distributions can only be defined '
+ raise RuntimeError( # noqa: TRY003
+ 'Conditional distributions can only be defined ' # noqa: EM101
"by moments (opt = 'MOM') or by parameters (opt = 'PAR')."
)
self.ID = ID
# check if param is a lambda function
- if type(param) == types.LambdaType:
+ if type(param) == types.LambdaType: # noqa: E721
self.Param = param
else:
- raise RuntimeError('The input param must be a lambda function.')
+ raise RuntimeError('The input param must be a lambda function.') # noqa: EM101, TRY003
self.modParam = param
# %%
- def condParam(self, cond):
+ def condParam(self, cond): # noqa: C901, N802, PLR0912, PLR0915
"""Evaluates the parameters of the distribution for the
different given conditions.
In case that the distribution is described by its moments,
@@ -159,7 +159,7 @@ def condParam(self, cond):
parameters.
This method is used by the ERACond methods condCDF, condPDF,
condiCDF and condRandom.
- """
+ """ # noqa: D205, D401
cond = np.array(cond, ndmin=2, dtype=float).T
par = self.modParam(cond)
n_cond = np.shape(cond)[0]
@@ -168,46 +168,46 @@ def condParam(self, cond):
# for the case of Opt == PAR
if self.Opt == 'PAR':
if self.Name == 'beta':
- Par = [par[0], par[1], par[2], par[3] - par[2]]
+ Par = [par[0], par[1], par[2], par[3] - par[2]] # noqa: N806
elif self.Name == 'binomial':
- Par = [par[0].astype(int), par[1]]
+ Par = [par[0].astype(int), par[1]] # noqa: N806
elif self.Name == 'chisquare':
- Par = np.around(par, 0)
+ Par = np.around(par, 0) # noqa: N806
elif self.Name == 'exponential':
- Par = 1 / par
+ Par = 1 / par # noqa: N806
elif self.Name == 'frechet':
- Par = [-1 / par[1], par[0] / par[1], par[0]]
+ Par = [-1 / par[1], par[0] / par[1], par[0]] # noqa: N806
elif self.Name == 'gamma':
- Par = [par[1], 1 / par[0]]
+ Par = [par[1], 1 / par[0]] # noqa: N806
elif self.Name == 'geometric':
- Par = par
+ Par = par # noqa: N806
elif self.Name == 'gev':
- Par = [-par[0], par[1], par[2]]
+ Par = [-par[0], par[1], par[2]] # noqa: N806
elif self.Name == 'gevmin':
- Par = [-par[0], par[1], -par[2]]
- elif self.Name == 'gumbel' or self.Name == 'gumbelmin':
- Par = par
+ Par = [-par[0], par[1], -par[2]] # noqa: N806
+ elif self.Name == 'gumbel' or self.Name == 'gumbelmin': # noqa: PLR1714
+ Par = par # noqa: N806
elif self.Name == 'lognormal':
- Par = [par[1], np.exp(par[0])]
- elif self.Name == 'negativebinomial' or self.Name == 'normal':
- Par = par
+ Par = [par[1], np.exp(par[0])] # noqa: N806
+ elif self.Name == 'negativebinomial' or self.Name == 'normal': # noqa: PLR1714
+ Par = par # noqa: N806
elif self.Name == 'pareto':
- Par = [1 / par[1], par[0] / par[1], par[0]]
+ Par = [1 / par[1], par[0] / par[1], par[0]] # noqa: N806
elif self.Name == 'poisson':
if isinstance(par, list):
- Par = par[0] * par[1]
+ Par = par[0] * par[1] # noqa: N806
else:
- Par = par
+ Par = par # noqa: N806
elif self.Name == 'rayleigh':
- Par = par
+ Par = par # noqa: N806
elif self.Name == 'truncatednormal':
a = (par[2] - par[0]) / par[1]
b = (par[3] - par[0]) / par[1]
- Par = [par[0], par[1], a, b]
+ Par = [par[0], par[1], a, b] # noqa: N806
elif self.Name == 'uniform':
- Par = [par[0], par[1] - par[0]]
+ Par = [par[0], par[1] - par[0]] # noqa: N806
elif self.Name == 'weibull':
- Par = par
+ Par = par # noqa: N806
# ----------------------------------------------------------------------------
# for the case of Opt == MOM
@@ -218,15 +218,15 @@ def condParam(self, cond):
/ (par[3] - par[2])
)
s = r * (par[3] - par[0]) / (par[0] - par[2])
- Par = [r, s, par[2], par[3] - par[2]]
+ Par = [r, s, par[2], par[3] - par[2]] # noqa: N806
elif self.Name == 'binomial':
p = 1 - (par[1]) ** 2 / par[0]
n = par[0] / p
- Par = [n.astype(int), p]
+ Par = [n.astype(int), p] # noqa: N806
elif self.Name == 'chisquare':
- Par = np.around(par, 0)
+ Par = np.around(par, 0) # noqa: N806
elif self.Name == 'exponential':
- Par = par
+ Par = par # noqa: N806
elif self.Name == 'frechet':
c = np.zeros(n_cond)
scale = np.zeros(n_cond)
@@ -241,7 +241,7 @@ def equation(param):
- special.gamma(1 - 1 / param) ** 2
)
/ special.gamma(1 - 1 / param)
- - par[1][i] / par[0][i]
+ - par[1][i] / par[0][i] # noqa: B023
)
sol = optimize.fsolve(equation, x0=param0, full_output=True)
@@ -255,11 +255,11 @@ def equation(param):
c[i] = np.nan
scale[i] = np.nan
loc[i] = np.nan
- Par = [c, scale, loc]
+ Par = [c, scale, loc] # noqa: N806
elif self.Name == 'gamma':
- Par = [(par[0] / par[1]) ** 2, par[1] ** 2 / par[0]]
+ Par = [(par[0] / par[1]) ** 2, par[1] ** 2 / par[0]] # noqa: N806
elif self.Name == 'geometric':
- Par = 1 / par
+ Par = 1 / par # noqa: N806
elif self.Name == 'gev':
beta = par[2]
alpha = (
@@ -268,7 +268,7 @@ def equation(param):
/ np.sqrt(special.gamma(1 - 2 * beta) - special.gamma(1 - beta) ** 2)
)
epsilon = par[0] - (alpha / beta * (special.gamma(1 - beta) - 1))
- Par = [-beta, alpha, epsilon]
+ Par = [-beta, alpha, epsilon] # noqa: N806
elif self.Name == 'gevmin':
beta = par[2]
alpha = (
@@ -277,36 +277,36 @@ def equation(param):
/ np.sqrt(special.gamma(1 - 2 * beta) - special.gamma(1 - beta) ** 2)
)
epsilon = par[0] + (alpha / beta * (special.gamma(1 - beta) - 1))
- Par = [-beta, alpha, -epsilon]
+ Par = [-beta, alpha, -epsilon] # noqa: N806
elif self.Name == 'gumbel':
a_n = par[1] * np.sqrt(6) / np.pi
b_n = par[0] - np.euler_gamma * a_n
- Par = [a_n, b_n]
+ Par = [a_n, b_n] # noqa: N806
elif self.Name == 'gumbelmin':
a_n = par[1] * np.sqrt(6) / np.pi
b_n = par[0] + np.euler_gamma * a_n
- Par = [a_n, b_n]
+ Par = [a_n, b_n] # noqa: N806
elif self.Name == 'lognormal':
mu_lnx = np.log(par[0] ** 2 / np.sqrt(par[1] ** 2 + par[0] ** 2))
sig_lnx = np.sqrt(np.log(1 + (par[1] / par[0]) ** 2))
- Par = [sig_lnx, np.exp(mu_lnx)]
+ Par = [sig_lnx, np.exp(mu_lnx)] # noqa: N806
elif self.Name == 'negativebinomial':
p = par[0] / (par[0] + par[1] ** 2)
k = par[0] * p
- Par = [k, p]
+ Par = [k, p] # noqa: N806
elif self.Name == 'normal':
- Par = par
+ Par = par # noqa: N806
elif self.Name == 'pareto':
alpha = 1 + np.sqrt(1 + (par[0] / par[1]) ** 2)
x_m = par[0] * (alpha - 1) / alpha
- Par = [1 / alpha, x_m / alpha, x_m]
+ Par = [1 / alpha, x_m / alpha, x_m] # noqa: N806
elif self.Name == 'poisson':
if isinstance(par, list):
- Par = par[0]
+ Par = par[0] # noqa: N806
else:
- Par = par
+ Par = par # noqa: N806
elif self.Name == 'rayleigh':
- Par = par / np.sqrt(np.pi / 2)
+ Par = par / np.sqrt(np.pi / 2) # noqa: N806
elif self.Name == 'truncatednormal':
mu = np.zeros(n_cond)
sig = np.zeros(n_cond)
@@ -323,28 +323,28 @@ def equation(param):
continue
def equation(param):
- f = lambda x: stats.norm.pdf(x, param[0], param[1]) / (
- stats.norm.cdf(b[i], param[0], param[1])
- - stats.norm.cdf(a[i], param[0], param[1])
+ f = lambda x: stats.norm.pdf(x, param[0], param[1]) / ( # noqa: E731
+ stats.norm.cdf(b[i], param[0], param[1]) # noqa: B023
+ - stats.norm.cdf(a[i], param[0], param[1]) # noqa: B023
)
expec_eq = (
- integrate.quadrature(lambda x: x * f(x), a[i], b[i])[0]
- - mean
+ integrate.quadrature(lambda x: x * f(x), a[i], b[i])[0] # noqa: B023
+ - mean # noqa: B023
)
std_eq = (
np.sqrt(
- integrate.quadrature(lambda x: x**2 * f(x), a[i], b[i])[
+ integrate.quadrature(lambda x: x**2 * f(x), a[i], b[i])[ # noqa: B023
0
]
- - (integrate.quadrature(lambda x: x * f(x), a[i], b[i]))[
+ - (integrate.quadrature(lambda x: x * f(x), a[i], b[i]))[ # noqa: B023
0
]
** 2
)
- - std
+ - std # noqa: B023
)
eq = [expec_eq, std_eq]
- return eq
+ return eq # noqa: RET504
x0 = [mean, std]
sol = optimize.fsolve(equation, x0=x0, full_output=True)
@@ -356,11 +356,11 @@ def equation(param):
b[i] = np.nan
mu[i] = np.nan
sig[i] = np.nan
- Par = [mu, sig, (a - mu) / sig, (b - mu) / sig]
+ Par = [mu, sig, (a - mu) / sig, (b - mu) / sig] # noqa: N806
elif self.Name == 'uniform':
lower = par[0] - np.sqrt(12) * par[1] / 2
upper = par[0] + np.sqrt(12) * par[1] / 2
- Par = [lower, upper - lower]
+ Par = [lower, upper - lower] # noqa: N806
elif self.Name == 'weibull':
a_n = np.zeros(n_cond)
k = np.zeros(n_cond)
@@ -373,7 +373,7 @@ def equation(param):
- (special.gamma(1 + 1 / param)) ** 2
)
/ special.gamma(1 + 1 / param)
- - par[1][i] / par[0][i]
+ - par[1][i] / par[0][i] # noqa: B023
)
sol = optimize.fsolve(equation, x0=0.02, full_output=True)
@@ -383,7 +383,7 @@ def equation(param):
else:
k[i] = np.nan
a_n[i] = np.nan
- Par = [a_n, k]
+ Par = [a_n, k] # noqa: N806
for i in range(len(Par)):
Par[i] = np.squeeze(Par[i])
@@ -391,215 +391,215 @@ def equation(param):
return Par
# %%
- def condCDF(self, x, cond):
+ def condCDF(self, x, cond): # noqa: C901, N802
"""Evaluates the CDF of the conditional distribution at x for
the given conditions.
This method is used by the ERARosen method X2U.
- """
+ """ # noqa: D205, D401
par = self.condParam(cond) # computation of the conditional parameters
x = np.array(x, ndmin=1, dtype=float)
if self.Name == 'beta':
- CDF = stats.beta.cdf(x, a=par[0], b=par[1], loc=par[2], scale=par[3])
+ CDF = stats.beta.cdf(x, a=par[0], b=par[1], loc=par[2], scale=par[3]) # noqa: N806
elif self.Name == 'binomial':
- CDF = stats.binom.cdf(x, n=par[0], p=par[1])
+ CDF = stats.binom.cdf(x, n=par[0], p=par[1]) # noqa: N806
elif self.Name == 'chisquare':
- CDF = stats.chi2.cdf(x, df=par)
+ CDF = stats.chi2.cdf(x, df=par) # noqa: N806
elif self.Name == 'exponential':
- CDF = stats.expon.cdf(x, scale=par)
+ CDF = stats.expon.cdf(x, scale=par) # noqa: N806
elif self.Name == 'frechet':
- CDF = stats.genextreme.cdf(x, c=par[0], scale=par[1], loc=par[2])
+ CDF = stats.genextreme.cdf(x, c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'gamma':
- CDF = stats.gamma.cdf(x, a=par[0], scale=par[1])
+ CDF = stats.gamma.cdf(x, a=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'geometric':
- CDF = stats.geom.cdf(x, p=par)
+ CDF = stats.geom.cdf(x, p=par) # noqa: N806
elif self.Name == 'gev':
- CDF = stats.genextreme.cdf(x, c=par[0], scale=par[1], loc=par[2])
+ CDF = stats.genextreme.cdf(x, c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'gevmin':
- CDF = 1 - stats.genextreme.cdf(-x, c=par[0], scale=par[1], loc=par[2])
+ CDF = 1 - stats.genextreme.cdf(-x, c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'gumbel':
- CDF = stats.gumbel_r.cdf(x, scale=par[0], loc=par[1])
+ CDF = stats.gumbel_r.cdf(x, scale=par[0], loc=par[1]) # noqa: N806
elif self.Name == 'gumbelmin':
- CDF = stats.gumbel_l.cdf(x, scale=par[0], loc=par[1])
+ CDF = stats.gumbel_l.cdf(x, scale=par[0], loc=par[1]) # noqa: N806
elif self.Name == 'lognormal':
- CDF = stats.lognorm.cdf(x, s=par[0], scale=par[1])
+ CDF = stats.lognorm.cdf(x, s=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'negativebinomial':
- CDF = stats.nbinom.cdf(x - par[0], n=par[0], p=par[1])
+ CDF = stats.nbinom.cdf(x - par[0], n=par[0], p=par[1]) # noqa: N806
elif self.Name == 'normal':
- CDF = stats.norm.cdf(x, loc=par[0], scale=par[1])
+ CDF = stats.norm.cdf(x, loc=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'pareto':
- CDF = stats.genpareto.cdf(x, c=par[0], scale=par[1], loc=par[2])
+ CDF = stats.genpareto.cdf(x, c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'poisson':
- CDF = stats.poisson.cdf(x, mu=par)
+ CDF = stats.poisson.cdf(x, mu=par) # noqa: N806
elif self.Name == 'rayleigh':
- CDF = stats.rayleigh.cdf(x, scale=par)
+ CDF = stats.rayleigh.cdf(x, scale=par) # noqa: N806
elif self.Name == 'truncatednormal':
- CDF = stats.truncnorm.cdf(
+ CDF = stats.truncnorm.cdf( # noqa: N806
x, loc=par[0], scale=par[1], a=par[2], b=par[3]
)
elif self.Name == 'uniform':
- CDF = stats.uniform.cdf(x, loc=par[0], scale=par[1])
+ CDF = stats.uniform.cdf(x, loc=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'weibull':
- CDF = stats.weibull_min.cdf(x, c=par[1], scale=par[0])
+ CDF = stats.weibull_min.cdf(x, c=par[1], scale=par[0]) # noqa: N806
return CDF
# %%
- def condiCDF(self, y, cond):
+ def condiCDF(self, y, cond): # noqa: C901, N802
"""Evaluates the inverse CDF of the conditional distribution at
y for the given conditions.
This method is used by the ERARosen method U2X.
- """
+ """ # noqa: D205, D401
par = self.condParam(cond) # computation of the conditional parameters
y = np.array(y, ndmin=1, dtype=float)
if self.Name == 'beta':
- iCDF = stats.beta.ppf(y, a=par[0], b=par[1], loc=par[2], scale=par[3])
+ iCDF = stats.beta.ppf(y, a=par[0], b=par[1], loc=par[2], scale=par[3]) # noqa: N806
elif self.Name == 'binomial':
- iCDF = stats.binom.ppf(y, n=par[0], p=par[1])
+ iCDF = stats.binom.ppf(y, n=par[0], p=par[1]) # noqa: N806
elif self.Name == 'chisquare':
- iCDF = stats.chi2.ppf(y, df=par)
+ iCDF = stats.chi2.ppf(y, df=par) # noqa: N806
elif self.Name == 'exponential':
- iCDF = stats.expon.ppf(y, scale=par)
+ iCDF = stats.expon.ppf(y, scale=par) # noqa: N806
elif self.Name == 'frechet':
- iCDF = stats.genextreme.ppf(y, c=par[0], scale=par[1], loc=par[2])
+ iCDF = stats.genextreme.ppf(y, c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'gamma':
- iCDF = stats.gamma.ppf(y, a=par[0], scale=par[1])
+ iCDF = stats.gamma.ppf(y, a=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'geometric':
- iCDF = stats.geom.ppf(y, p=par)
+ iCDF = stats.geom.ppf(y, p=par) # noqa: N806
elif self.Name == 'gev':
- iCDF = stats.genextreme.ppf(y, c=par[0], scale=par[1], loc=par[2])
+ iCDF = stats.genextreme.ppf(y, c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'gevmin':
- iCDF = -stats.genextreme.ppf(1 - y, c=par[0], scale=par[1], loc=par[2])
+ iCDF = -stats.genextreme.ppf(1 - y, c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'gumbel':
- iCDF = stats.gumbel_r.ppf(y, scale=par[0], loc=par[1])
+ iCDF = stats.gumbel_r.ppf(y, scale=par[0], loc=par[1]) # noqa: N806
elif self.Name == 'gumbelmin':
- iCDF = stats.gumbel_l.ppf(y, scale=par[0], loc=par[1])
+ iCDF = stats.gumbel_l.ppf(y, scale=par[0], loc=par[1]) # noqa: N806
elif self.Name == 'lognormal':
- iCDF = stats.lognorm.ppf(y, s=par[0], scale=par[1])
+ iCDF = stats.lognorm.ppf(y, s=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'negativebinomial':
- iCDF = stats.nbinom.ppf(y, n=par[0], p=par[1]) + par[0]
+ iCDF = stats.nbinom.ppf(y, n=par[0], p=par[1]) + par[0] # noqa: N806
elif self.Name == 'normal':
- iCDF = stats.norm.ppf(y, loc=par[0], scale=par[1])
+ iCDF = stats.norm.ppf(y, loc=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'pareto':
- iCDF = stats.genpareto.ppf(y, c=par[0], scale=par[1], loc=par[2])
+ iCDF = stats.genpareto.ppf(y, c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'poisson':
- iCDF = stats.poisson.ppf(y, mu=par)
+ iCDF = stats.poisson.ppf(y, mu=par) # noqa: N806
elif self.Name == 'rayleigh':
- iCDF = stats.rayleigh.ppf(y, scale=par)
+ iCDF = stats.rayleigh.ppf(y, scale=par) # noqa: N806
elif self.Name == 'truncatednormal':
- iCDF = stats.truncnorm.ppf(
+ iCDF = stats.truncnorm.ppf( # noqa: N806
y, loc=par[0], scale=par[1], a=par[2], b=par[3]
)
elif self.Name == 'uniform':
- iCDF = stats.uniform.ppf(y, loc=par[0], scale=par[1])
+ iCDF = stats.uniform.ppf(y, loc=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'weibull':
- iCDF = stats.weibull_min.ppf(y, c=par[1], scale=par[0])
+ iCDF = stats.weibull_min.ppf(y, c=par[1], scale=par[0]) # noqa: N806
return iCDF
# %%
- def condPDF(self, x, cond):
+ def condPDF(self, x, cond): # noqa: C901, N802
"""Evaluates the PDF of the conditional distribution at x for
the given conditions.
This method is used by the ERARosen method pdf.
- """
+ """ # noqa: D205, D401
par = self.condParam(cond) # computation of the conditional parameters
x = np.array(x, ndmin=1, dtype=float)
if self.Name == 'beta':
- PDF = stats.beta.pdf(x, a=par[0], b=par[1], loc=par[2], scale=par[3])
+ PDF = stats.beta.pdf(x, a=par[0], b=par[1], loc=par[2], scale=par[3]) # noqa: N806
elif self.Name == 'binomial':
- PDF = stats.binom.pmf(x, n=par[0], p=par[1])
+ PDF = stats.binom.pmf(x, n=par[0], p=par[1]) # noqa: N806
elif self.Name == 'chisquare':
- PDF = stats.chi2.pdf(x, df=par)
+ PDF = stats.chi2.pdf(x, df=par) # noqa: N806
elif self.Name == 'exponential':
- PDF = stats.expon.pdf(x, scale=par)
+ PDF = stats.expon.pdf(x, scale=par) # noqa: N806
elif self.Name == 'frechet':
- PDF = stats.genextreme.pdf(x, c=par[0], scale=par[1], loc=par[2])
+ PDF = stats.genextreme.pdf(x, c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'gamma':
- PDF = stats.gamma.pdf(x, a=par[0], scale=par[1])
+ PDF = stats.gamma.pdf(x, a=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'geometric':
- PDF = stats.geom.pmf(x, p=par)
+ PDF = stats.geom.pmf(x, p=par) # noqa: N806
elif self.Name == 'gev':
- PDF = stats.genextreme.pdf(x, c=par[0], scale=par[1], loc=par[2])
+ PDF = stats.genextreme.pdf(x, c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'gevmin':
- PDF = stats.genextreme.pdf(-x, c=par[0], scale=par[1], loc=par[2])
+ PDF = stats.genextreme.pdf(-x, c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'gumbel':
- PDF = stats.gumbel_r.pdf(x, scale=par[0], loc=par[1])
+ PDF = stats.gumbel_r.pdf(x, scale=par[0], loc=par[1]) # noqa: N806
elif self.Name == 'gumbelmin':
- PDF = stats.gumbel_l.pdf(x, scale=par[0], loc=par[1])
+ PDF = stats.gumbel_l.pdf(x, scale=par[0], loc=par[1]) # noqa: N806
elif self.Name == 'lognormal':
- PDF = stats.lognorm.pdf(x, s=par[0], scale=par[1])
+ PDF = stats.lognorm.pdf(x, s=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'negativebinomial':
- PDF = stats.nbinom.pmf(x - par[0], n=par[0], p=par[1])
+ PDF = stats.nbinom.pmf(x - par[0], n=par[0], p=par[1]) # noqa: N806
elif self.Name == 'normal':
- PDF = stats.norm.pdf(x, loc=par[0], scale=par[1])
+ PDF = stats.norm.pdf(x, loc=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'pareto':
- PDF = stats.genpareto.pdf(x, c=par[0], scale=par[1], loc=par[2])
+ PDF = stats.genpareto.pdf(x, c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'poisson':
- PDF = stats.poisson.pmf(x, mu=par)
+ PDF = stats.poisson.pmf(x, mu=par) # noqa: N806
elif self.Name == 'rayleigh':
- PDF = stats.rayleigh.pdf(x, scale=par)
+ PDF = stats.rayleigh.pdf(x, scale=par) # noqa: N806
elif self.Name == 'truncatednormal':
- PDF = stats.truncnorm.pdf(
+ PDF = stats.truncnorm.pdf( # noqa: N806
x, loc=par[0], scale=par[1], a=par[2], b=par[3]
)
elif self.Name == 'uniform':
- PDF = stats.uniform.pdf(x, loc=par[0], scale=par[1])
+ PDF = stats.uniform.pdf(x, loc=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'weibull':
- PDF = stats.weibull_min.pdf(x, c=par[1], scale=par[0])
+ PDF = stats.weibull_min.pdf(x, c=par[1], scale=par[0]) # noqa: N806
return PDF
# %%
- def condRandom(self, cond):
+ def condRandom(self, cond): # noqa: C901, N802
"""Creates one random sample for each given condition.
This method is used by the ERARosen method random.
- """
+ """ # noqa: D205, D401
par = self.condParam(cond) # computation of the conditional parameters
if self.Name == 'beta':
- Random = stats.beta.rvs(a=par[0], b=par[1], loc=par[2], scale=par[3])
+ Random = stats.beta.rvs(a=par[0], b=par[1], loc=par[2], scale=par[3]) # noqa: N806
elif self.Name == 'binomial':
- Random = stats.binom.rvs(n=par[0], p=par[1])
+ Random = stats.binom.rvs(n=par[0], p=par[1]) # noqa: N806
elif self.Name == 'chisquare':
- Random = stats.chi2.rvs(df=par)
+ Random = stats.chi2.rvs(df=par) # noqa: N806
elif self.Name == 'exponential':
- Random = stats.expon.rvs(scale=par)
+ Random = stats.expon.rvs(scale=par) # noqa: N806
elif self.Name == 'frechet':
- Random = stats.genextreme.rvs(c=par[0], scale=par[1], loc=par[2])
+ Random = stats.genextreme.rvs(c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'gamma':
- Random = stats.gamma.rvs(a=par[0], scale=par[1])
+ Random = stats.gamma.rvs(a=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'geometric':
- Random = stats.geom.rvs(p=par)
+ Random = stats.geom.rvs(p=par) # noqa: N806
elif self.Name == 'gev':
- Random = stats.genextreme.rvs(c=par[0], scale=par[1], loc=par[2])
+ Random = stats.genextreme.rvs(c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'gevmin':
- Random = -stats.genextreme.rvs(c=par[0], scale=par[1], loc=par[2])
+ Random = -stats.genextreme.rvs(c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'gumbel':
- Random = stats.gumbel_r.rvs(scale=par[0], loc=par[1])
+ Random = stats.gumbel_r.rvs(scale=par[0], loc=par[1]) # noqa: N806
elif self.Name == 'gumbelmin':
- Random = stats.gumbel_l.rvs(scale=par[0], loc=par[1])
+ Random = stats.gumbel_l.rvs(scale=par[0], loc=par[1]) # noqa: N806
elif self.Name == 'lognormal':
- Random = stats.lognorm.rvs(s=par[0], scale=par[1])
+ Random = stats.lognorm.rvs(s=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'negativebinomial':
- Random = stats.nbinom.rvs(n=par[0], p=par[1]) + par[0]
+ Random = stats.nbinom.rvs(n=par[0], p=par[1]) + par[0] # noqa: N806
elif self.Name == 'normal':
- Random = stats.norm.rvs(loc=par[0], scale=par[1])
+ Random = stats.norm.rvs(loc=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'pareto':
- Random = stats.genpareto.rvs(c=par[0], scale=par[1], loc=par[2])
+ Random = stats.genpareto.rvs(c=par[0], scale=par[1], loc=par[2]) # noqa: N806
elif self.Name == 'poisson':
- Random = stats.poisson.rvs(mu=par)
+ Random = stats.poisson.rvs(mu=par) # noqa: N806
elif self.Name == 'rayleigh':
- Random = stats.rayleigh.rvs(scale=par)
+ Random = stats.rayleigh.rvs(scale=par) # noqa: N806
elif self.Name == 'truncatednormal':
- Random = stats.truncnorm.rvs(
+ Random = stats.truncnorm.rvs( # noqa: N806
loc=par[0], scale=par[1], a=par[2], b=par[3]
)
elif self.Name == 'uniform':
- Random = stats.uniform.rvs(loc=par[0], scale=par[1])
+ Random = stats.uniform.rvs(loc=par[0], scale=par[1]) # noqa: N806
elif self.Name == 'weibull':
- Random = stats.weibull_min.rvs(c=par[1], scale=par[0])
+ Random = stats.weibull_min.rvs(c=par[1], scale=par[0]) # noqa: N806
return Random
diff --git a/modules/performUQ/common/ERAClasses/ERADist.py b/modules/performUQ/common/ERAClasses/ERADist.py
index caaae7b5e..3b9a83d6f 100644
--- a/modules/performUQ/common/ERAClasses/ERADist.py
+++ b/modules/performUQ/common/ERAClasses/ERADist.py
@@ -1,4 +1,4 @@
-# import of modules
+# import of modules # noqa: INP001, D100
import warnings
import numpy as np
@@ -47,7 +47,7 @@
They can be defined either by their parameters, the first and second
moment or by data, given as a vector.
---------------------------------------------------------------------------
-"""
+""" # noqa: W291
class ERADist:
@@ -136,13 +136,13 @@ class ERADist:
Uniform: Obj = ERADist('uniform','DATA',[X])
Weibull: Obj = ERADist('weibull','DATA',[X])
- """
+ """ # noqa: D205, D400
# %%
- def __init__(self, name, opt, val=[0, 1], ID=False):
+ def __init__(self, name, opt, val=[0, 1], ID=False): # noqa: FBT002, B006, C901, N803, PLR0912, PLR0915
"""Constructor method, for more details have a look at the
class description.
- """
+ """ # noqa: D205, D401
self.Name = name.lower()
self.ID = ID
@@ -165,8 +165,8 @@ class description.
scale=self.Par['b'] - self.Par['a'],
)
else:
- raise RuntimeError(
- 'The Beta distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Beta distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'binomial':
@@ -174,8 +174,8 @@ class description.
self.Par = {'n': int(val[0]), 'p': val[1]}
self.Dist = stats.binom(n=self.Par['n'], p=self.Par['p'])
else:
- raise RuntimeError(
- 'The Binomial distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Binomial distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'chisquare':
@@ -183,8 +183,8 @@ class description.
self.Par = {'k': np.around(val[0], 0)}
self.Dist = stats.chi2(df=self.Par['k'])
else:
- raise RuntimeError(
- 'The Chi-Squared distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Chi-Squared distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'exponential':
@@ -192,8 +192,8 @@ class description.
self.Par = {'lambda': val[0]}
self.Dist = stats.expon(scale=1 / self.Par['lambda'])
else:
- raise RuntimeError(
- 'The Exponential distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Exponential distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'frechet':
@@ -205,8 +205,8 @@ class description.
loc=self.Par['a_n'],
)
else:
- raise RuntimeError(
- 'The Frechet distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Frechet distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'gamma':
@@ -216,8 +216,8 @@ class description.
a=self.Par['k'], scale=1 / self.Par['lambda']
)
else:
- raise RuntimeError(
- 'The Gamma distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Gamma distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'geometric':
@@ -226,8 +226,8 @@ class description.
self.Par = {'p': val}
self.Dist = stats.geom(p=self.Par['p'])
else:
- raise RuntimeError(
- 'The Geometric distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Geometric distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'gev':
@@ -239,8 +239,8 @@ class description.
loc=self.Par['epsilon'],
)
else:
- raise RuntimeError(
- 'The Generalized Extreme Value gistribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Generalized Extreme Value gistribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'gevmin':
@@ -252,8 +252,8 @@ class description.
loc=-self.Par['epsilon'],
)
else:
- raise RuntimeError(
- 'The Generalized Extreme Value distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Generalized Extreme Value distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'gumbel':
@@ -263,8 +263,8 @@ class description.
scale=self.Par['a_n'], loc=self.Par['b_n']
)
else:
- raise RuntimeError(
- 'The Gumbel distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Gumbel distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'gumbelmin':
@@ -274,8 +274,8 @@ class description.
scale=self.Par['a_n'], loc=self.Par['b_n']
)
else:
- raise RuntimeError(
- 'The Gumbel distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Gumbel distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'lognormal':
@@ -285,8 +285,8 @@ class description.
s=self.Par['sig_lnx'], scale=np.exp(self.Par['mu_lnx'])
)
else:
- raise RuntimeError(
- 'The Lognormal distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Lognormal distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'negativebinomial':
@@ -299,8 +299,8 @@ class description.
self.Par = {'k': val[0], 'p': val[1]}
self.Dist = stats.nbinom(n=self.Par['k'], p=self.Par['p'])
else:
- raise RuntimeError(
- 'The Negative Binomial distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Negative Binomial distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'normal' or name.lower() == 'gaussian':
@@ -310,8 +310,8 @@ class description.
loc=self.Par['mu'], scale=self.Par['sigma']
)
else:
- raise RuntimeError(
- 'The Normal distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Normal distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'pareto':
@@ -323,8 +323,8 @@ class description.
loc=self.Par['x_m'],
)
else:
- raise RuntimeError(
- 'The Pareto distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Pareto distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'poisson':
@@ -334,17 +334,17 @@ class description.
self.Par = {'lambda': val[0]}
self.Dist = stats.poisson(mu=self.Par['lambda'])
else:
- raise RuntimeError(
- 'The Poisson distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Poisson distribution is not defined for your parameters.' # noqa: EM101
)
- if n == 2:
+ if n == 2: # noqa: PLR2004
if val[0] > 0 and val[1] > 0:
self.Par = {'v': val[0], 't': val[1]}
self.Dist = stats.poisson(mu=self.Par['v'] * self.Par['t'])
else:
- raise RuntimeError(
- 'The Poisson distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Poisson distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'rayleigh':
@@ -353,8 +353,8 @@ class description.
self.Par = {'alpha': alpha}
self.Dist = stats.rayleigh(scale=self.Par['alpha'])
else:
- raise RuntimeError(
- 'The Rayleigh distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Rayleigh distribution is not defined for your parameters.' # noqa: EM101
)
elif (name.lower() == 'standardnormal') or (
@@ -365,11 +365,11 @@ class description.
elif name.lower() == 'truncatednormal':
if val[2] >= val[3]:
- raise RuntimeError(
- 'The upper bound a must be larger than the lower bound b.'
+ raise RuntimeError( # noqa: TRY003
+ 'The upper bound a must be larger than the lower bound b.' # noqa: EM101
)
if val[1] < 0:
- raise RuntimeError('sigma must be larger than 0.')
+ raise RuntimeError('sigma must be larger than 0.') # noqa: EM101, TRY003
self.Par = {
'mu_n': val[0],
'sig_n': val[1],
@@ -390,8 +390,8 @@ class description.
scale=self.Par['upper'] - self.Par['lower'],
)
else:
- raise RuntimeError(
- 'The Uniform distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Uniform distribution is not defined for your parameters.' # noqa: EM101
)
elif name.lower() == 'weibull':
@@ -401,8 +401,8 @@ class description.
c=self.Par['k'], scale=self.Par['a_n']
)
else:
- raise RuntimeError(
- 'The Weibull distribution is not defined for your parameters.'
+ raise RuntimeError( # noqa: TRY003
+ 'The Weibull distribution is not defined for your parameters.' # noqa: EM101
)
else:
@@ -414,11 +414,11 @@ class description.
val = np.array(val, ndmin=1, dtype=float)
if val.size > 1 and val[1] < 0:
- raise RuntimeError('The standard deviation must be non-negative.')
+ raise RuntimeError('The standard deviation must be non-negative.') # noqa: EM101, TRY003
if name.lower() == 'beta':
if val[3] <= val[2]:
- raise RuntimeError('Please select an other support [a,b].')
+ raise RuntimeError('Please select an other support [a,b].') # noqa: EM101, TRY003
r = (
((val[3] - val[0]) * (val[0] - val[2]) / val[1] ** 2 - 1)
* (val[0] - val[2])
@@ -427,7 +427,7 @@ class description.
s = r * (val[3] - val[0]) / (val[0] - val[2])
# Evaluate if distribution can be defined on the parameters
if r <= 0 and s <= 0:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
self.Par = {'r': r, 's': s, 'a': val[2], 'b': val[3]}
self.Dist = stats.beta(
a=self.Par['r'],
@@ -444,30 +444,30 @@ class description.
if n % 1 <= 10 ** (-4):
n = int(n)
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
if p >= 0 and p <= 1 and n > 0:
self.Par = {'n': n, 'p': p}
self.Dist = stats.binom(n=self.Par['n'], p=self.Par['p'])
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
elif name.lower() == 'chisquare':
if val[0] > 0 and val[0] < np.inf and val[0] % 1 <= 10 ** (-4):
self.Par = {'k': np.around(val[0], 0)}
self.Dist = stats.chi2(df=self.Par['k'])
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
elif name.lower() == 'exponential':
try:
lam = 1 / val[0]
except ZeroDivisionError:
- raise RuntimeError('The first moment cannot be zero!')
+ raise RuntimeError('The first moment cannot be zero!') # noqa: B904, EM101, TRY003
if lam >= 0:
self.Par = {'lambda': lam}
self.Dist = stats.expon(scale=1 / self.Par['lambda'])
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
elif name.lower() == 'frechet':
par0 = 2.0001
@@ -487,8 +487,8 @@ def equation(par):
k = sol[0][0]
a_n = val[0] / special.gamma(1 - 1 / k)
else:
- raise RuntimeError(
- 'fsolve could not converge to a solution, therefore'
+ raise RuntimeError( # noqa: TRY003
+ 'fsolve could not converge to a solution, therefore' # noqa: EM101
'the parameters of the Frechet distribution could not be determined.'
)
if a_n > 0 and k > 0:
@@ -499,7 +499,7 @@ def equation(par):
loc=self.Par['a_n'],
)
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
elif name.lower() == 'gamma':
# Solve system of equations for the parameters
@@ -512,7 +512,7 @@ def equation(par):
a=self.Par['k'], scale=1 / self.Par['lambda']
)
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
elif name.lower() == 'geometric':
# Solve Equation for the parameter based on the first moment
@@ -521,7 +521,7 @@ def equation(par):
self.Par = {'p': p}
self.Dist = stats.geom(p=self.Par['p'])
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
elif name.lower() == 'gev':
beta = val[2]
@@ -529,8 +529,8 @@ def equation(par):
# Solve two equations for the parameters of the distribution
alpha = val[1] * np.sqrt(6) / np.pi # scale parameter
epsilon = val[2] - np.euler_gamma * alpha # location parameter
- elif beta >= 0.5:
- raise RuntimeError('MOM can only be used for beta < 0.5 .')
+ elif beta >= 0.5: # noqa: PLR2004
+ raise RuntimeError('MOM can only be used for beta < 0.5 .') # noqa: EM101, TRY003
else:
alpha = (
abs(beta)
@@ -554,8 +554,8 @@ def equation(par):
# Solve two equations for the parameters of the distribution
alpha = val[1] * np.sqrt(6) / np.pi # scale parameter
epsilon = val[2] + np.euler_gamma * alpha # location parameter
- elif beta >= 0.5:
- raise RuntimeError('MOM can only be used for beta < 0.5 .')
+ elif beta >= 0.5: # noqa: PLR2004
+ raise RuntimeError('MOM can only be used for beta < 0.5 .') # noqa: EM101, TRY003
else:
alpha = (
abs(beta)
@@ -583,7 +583,7 @@ def equation(par):
scale=self.Par['a_n'], loc=self.Par['b_n']
)
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
elif name.lower() == 'gumbelmin':
# solve two equations for the parameters of the distribution
@@ -595,12 +595,12 @@ def equation(par):
scale=self.Par['a_n'], loc=self.Par['b_n']
)
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
elif name.lower() == 'lognormal':
if val[0] <= 0:
- raise RuntimeError(
- 'Please select other moments, the first moment must be greater than zero.'
+ raise RuntimeError( # noqa: TRY003
+ 'Please select other moments, the first moment must be greater than zero.' # noqa: EM101
)
# solve two equations for the parameters of the distribution
mu_lnx = np.log(val[0] ** 2 / np.sqrt(val[1] ** 2 + val[0] ** 2))
@@ -621,9 +621,9 @@ def equation(par):
self.Par = {'k': k, 'p': p}
self.Dist = stats.nbinom(n=self.Par['k'], p=self.Par['p'])
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
elif (name.lower() == 'normal') or (name.lower() == 'gaussian'):
self.Par = {'mu': val[0], 'sigma': val[1]}
@@ -640,7 +640,7 @@ def equation(par):
loc=self.Par['x_m'],
)
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
elif name.lower() == 'poisson':
n = len(val)
@@ -649,17 +649,17 @@ def equation(par):
self.Par = {'lambda': val[0]}
self.Dist = stats.poisson(mu=self.Par['lambda'])
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
- if n == 2:
+ if n == 2: # noqa: PLR2004
if val[0] > 0 and val[1] > 0:
v = val[0] / val[1]
if val[1] <= 0:
- raise RuntimeError('t must be positive.')
+ raise RuntimeError('t must be positive.') # noqa: EM101, TRY003
self.Par = {'v': v, 't': val[1]}
self.Dist = stats.poisson(mu=self.Par['v'] * self.Par['t'])
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
elif name.lower() == 'rayleigh':
alpha = val[0] / np.sqrt(np.pi / 2)
@@ -667,7 +667,7 @@ def equation(par):
self.Par = {'alpha': alpha}
self.Dist = stats.rayleigh(scale=self.Par['alpha'])
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
elif (name.lower() == 'standardnormal') or (
name.lower() == 'standardgaussian'
@@ -677,16 +677,16 @@ def equation(par):
elif name.lower() == 'truncatednormal':
if val[2] >= val[3]:
- raise RuntimeError(
- 'The upper bound a must be larger than the lower bound b.'
+ raise RuntimeError( # noqa: TRY003
+ 'The upper bound a must be larger than the lower bound b.' # noqa: EM101
)
if val[0] <= val[2] or val[0] >= val[3]:
- raise RuntimeError(
- 'The mean of the distribution must be within the interval [a,b].'
+ raise RuntimeError( # noqa: TRY003
+ 'The mean of the distribution must be within the interval [a,b].' # noqa: EM101
)
def equation(par):
- f = lambda x: stats.norm.pdf(x, par[0], par[1]) / (
+ f = lambda x: stats.norm.pdf(x, par[0], par[1]) / ( # noqa: E731
stats.norm.cdf(val[3], par[0], par[1])
- stats.norm.cdf(val[2], par[0], par[1])
)
@@ -707,7 +707,7 @@ def equation(par):
- val[1]
)
eq = [expec_eq, std_eq]
- return eq
+ return eq # noqa: RET504
x0 = [val[0], val[1]]
sol = optimize.fsolve(equation, x0=x0, full_output=True)
@@ -727,7 +727,7 @@ def equation(par):
b=b_mod,
)
else:
- raise RuntimeError('fsolve did not converge.')
+ raise RuntimeError('fsolve did not converge.') # noqa: EM101, TRY003
elif name.lower() == 'uniform':
# compute parameters
@@ -756,8 +756,8 @@ def equation(par):
k = sol[0][0]
a_n = val[0] / special.gamma(1 + 1 / k)
else:
- raise RuntimeError(
- 'fsolve could not converge to a solution, therefore'
+ raise RuntimeError( # noqa: TRY003
+ 'fsolve could not converge to a solution, therefore' # noqa: EM101
'the parameters of the Weibull distribution could not be determined.'
)
if a_n > 0 and k > 0:
@@ -766,7 +766,7 @@ def equation(par):
c=self.Par['k'], scale=self.Par['a_n']
)
else:
- raise RuntimeError('Please select other moments.')
+ raise RuntimeError('Please select other moments.') # noqa: EM101, TRY003
else:
raise RuntimeError("Distribution type '" + name + "' not available.")
@@ -776,7 +776,7 @@ def equation(par):
elif opt.upper() == 'DATA':
if name.lower() == 'beta':
if val[2] <= val[1]:
- raise RuntimeError('Please select a different support [a,b].')
+ raise RuntimeError('Please select a different support [a,b].') # noqa: EM101, TRY003
if min(val[0]) >= val[1] and max(val[0]) <= val[2]:
pars = stats.beta.fit(
val[0], floc=val[1], fscale=val[2] - val[1]
@@ -789,8 +789,8 @@ def equation(par):
scale=self.Par['b'] - self.Par['a'],
)
else:
- raise RuntimeError(
- 'The given samples must be in the support range [a,b].'
+ raise RuntimeError( # noqa: TRY003
+ 'The given samples must be in the support range [a,b].' # noqa: EM101
)
elif name.lower() == 'binomial':
@@ -798,13 +798,13 @@ def equation(par):
if val[1] % 1 <= 10 ** (-4) and val[1] > 0:
val[1] = int(val[1])
else:
- raise RuntimeError('n must be a positive integer.')
- X = np.array(val[0])
+ raise RuntimeError('n must be a positive integer.') # noqa: EM101, TRY003
+ X = np.array(val[0]) # noqa: N806
if all((X) % 1 <= 10 ** (-4)) and all(X >= 0) and all(val[1] >= X):
- X = np.around(X, 0)
+ X = np.around(X, 0) # noqa: N806
else:
- raise RuntimeError(
- 'The given samples must be integers in the range [0,n].'
+ raise RuntimeError( # noqa: TRY003
+ 'The given samples must be integers in the range [0,n].' # noqa: EM101
)
val[0] = np.mean(val[0]) / val[1]
self.Par = {'n': val[1], 'p': val[0]}
@@ -816,7 +816,7 @@ def equation(par):
self.Par = {'k': np.around(pars[0], 0)}
self.Dist = stats.chi2(df=self.Par['k'])
else:
- raise RuntimeError('The given samples must be non-negative.')
+ raise RuntimeError('The given samples must be non-negative.') # noqa: EM101, TRY003
elif name.lower() == 'exponential':
if min(val) >= 0:
@@ -824,11 +824,11 @@ def equation(par):
self.Par = {'lambda': 1 / pars[1]}
self.Dist = stats.expon(scale=1 / self.Par['lambda'])
else:
- raise RuntimeError('The given samples must be non-negative.')
+ raise RuntimeError('The given samples must be non-negative.') # noqa: EM101, TRY003
elif name.lower() == 'frechet':
if min(val) < 0:
- raise RuntimeError('The given samples must be non-negative.')
+ raise RuntimeError('The given samples must be non-negative.') # noqa: EM101, TRY003
def equation(par):
return -np.sum(
@@ -844,7 +844,7 @@ def equation(par):
x0 = np.array([par0, par1])
bnds = optimize.Bounds(lb=[0, 0], ub=[np.inf, np.inf])
sol = optimize.minimize(equation, x0, bounds=bnds)
- if sol.success == True:
+ if sol.success == True: # noqa: E712
self.Par = {'a_n': sol.x[0], 'k': sol.x[1]}
self.Dist = stats.genextreme(
c=-1 / self.Par['k'],
@@ -852,8 +852,8 @@ def equation(par):
loc=self.Par['a_n'],
)
else:
- raise RuntimeError(
- 'Maximum likelihood estimation did not converge.'
+ raise RuntimeError( # noqa: TRY003
+ 'Maximum likelihood estimation did not converge.' # noqa: EM101
)
elif name.lower() == 'gamma':
@@ -868,8 +868,8 @@ def equation(par):
self.Par = {'p': 1 / np.mean(val)}
self.Dist = stats.geom(p=self.Par['p'])
else:
- raise RuntimeError(
- 'The given samples must be integers larger than 0.'
+ raise RuntimeError( # noqa: TRY003
+ 'The given samples must be integers larger than 0.' # noqa: EM101
)
elif name.lower() == 'gev':
@@ -916,8 +916,8 @@ def equation(par):
p = np.mean(val) / (np.mean(val) + np.var(val))
k = np.mean(val) * p
if k == 0:
- raise RuntimeError(
- 'No suitable parameters can be estimated from the given data.'
+ raise RuntimeError( # noqa: TRY003
+ 'No suitable parameters can be estimated from the given data.' # noqa: EM101
)
k = round(
k, 0
@@ -946,7 +946,7 @@ def equation(par):
x0 = x_m
sol = optimize.minimize(equation, x0)
- if sol.success == True:
+ if sol.success == True: # noqa: E712
self.Par = {'x_m': x_m, 'alpha': float(sol.x)}
self.Dist = stats.genpareto(
c=1 / self.Par['alpha'],
@@ -954,33 +954,33 @@ def equation(par):
loc=self.Par['x_m'],
)
else:
- raise RuntimeError(
- 'Maximum likelihood estimation did not converge.'
+ raise RuntimeError( # noqa: TRY003
+ 'Maximum likelihood estimation did not converge.' # noqa: EM101
)
else:
- raise RuntimeError('The given data must be positive.')
+ raise RuntimeError('The given data must be positive.') # noqa: EM101, TRY003
elif name.lower() == 'poisson':
n = len(val)
- if n == 2:
- X = val[0]
+ if n == 2: # noqa: PLR2004
+ X = val[0] # noqa: N806
t = val[1]
if t <= 0:
- raise RuntimeError('t must be positive.')
+ raise RuntimeError('t must be positive.') # noqa: EM101, TRY003
if all(X >= 0) and all(X % 1 == 0):
v = np.mean(X) / t
self.Par = {'v': v, 't': t}
self.Dist = stats.poisson(mu=self.Par['v'] * self.Par['t'])
else:
- raise RuntimeError(
- 'The given samples must be non-negative integers.'
+ raise RuntimeError( # noqa: TRY003
+ 'The given samples must be non-negative integers.' # noqa: EM101
)
elif all(val >= 0) and all(val % 1 == 0):
self.Par = {'lambda': np.mean(val)}
self.Dist = stats.poisson(mu=self.Par['lambda'])
else:
- raise RuntimeError(
- 'The given samples must be non-negative integers.'
+ raise RuntimeError( # noqa: TRY003
+ 'The given samples must be non-negative integers.' # noqa: EM101
)
elif name.lower() == 'rayleigh':
@@ -989,14 +989,14 @@ def equation(par):
self.Dist = stats.rayleigh(scale=self.Par['alpha'])
elif name.lower() == 'truncatednormal':
- X = val[0]
+ X = val[0] # noqa: N806
if val[1] >= val[2]:
- raise RuntimeError(
- 'The upper bound a must be larger than the lower bound b.'
+ raise RuntimeError( # noqa: TRY003
+ 'The upper bound a must be larger than the lower bound b.' # noqa: EM101
)
if not (all(val[1] <= X) and all(val[2] >= X)):
- raise RuntimeError(
- 'The given samples must be in the range [a,b].'
+ raise RuntimeError( # noqa: TRY003
+ 'The given samples must be in the range [a,b].' # noqa: EM101
)
def equation(par):
@@ -1013,7 +1013,7 @@ def equation(par):
x0 = np.array([np.mean(X), np.std(X)])
bnds = optimize.Bounds(lb=[-np.inf, 0], ub=[np.inf, np.inf])
sol = optimize.minimize(equation, x0, bounds=bnds)
- if sol.success == True:
+ if sol.success == True: # noqa: E712
self.Par = {
'mu_n': float(sol.x[0]),
'sig_n': float(sol.x[1]),
@@ -1029,8 +1029,8 @@ def equation(par):
b=b_mod,
)
else:
- raise RuntimeError(
- 'Maximum likelihood estimation did not converge.'
+ raise RuntimeError( # noqa: TRY003
+ 'Maximum likelihood estimation did not converge.' # noqa: EM101
)
elif name.lower() == 'uniform':
@@ -1053,11 +1053,11 @@ def equation(par):
# %%
def mean(self):
- """Returns the mean of the distribution."""
+ """Returns the mean of the distribution.""" # noqa: D401
if self.Name == 'gevmin':
return -self.Dist.mean()
- elif self.Name == 'negativebinomial':
+ elif self.Name == 'negativebinomial': # noqa: RET505
return self.Dist.mean() + self.Par['k']
else:
@@ -1065,16 +1065,16 @@ def mean(self):
# %%
def std(self):
- """Returns the standard deviation of the distribution."""
+ """Returns the standard deviation of the distribution.""" # noqa: D401
return self.Dist.std()
# %%
def pdf(self, x):
- """Returns the PDF value."""
- if self.Name == 'binomial' or self.Name == 'geometric':
+ """Returns the PDF value.""" # noqa: D401
+ if self.Name == 'binomial' or self.Name == 'geometric': # noqa: PLR1714
return self.Dist.pmf(x)
- elif self.Name == 'gevmin':
+ elif self.Name == 'gevmin': # noqa: RET505
return self.Dist.pdf(-x)
elif self.Name == 'negativebinomial':
@@ -1088,11 +1088,11 @@ def pdf(self, x):
# %%
def cdf(self, x):
- """Returns the CDF value."""
+ """Returns the CDF value.""" # noqa: D401
if self.Name == 'gevmin':
return 1 - self.Dist.cdf(-x) # <-- this is not a proper cdf !
- elif self.Name == 'negativebinomial':
+ elif self.Name == 'negativebinomial': # noqa: RET505
return self.Dist.cdf(x - self.Par['k'])
else:
@@ -1102,25 +1102,25 @@ def cdf(self, x):
def random(self, size=None):
"""Generates random samples according to the distribution of the
object.
- """
+ """ # noqa: D205, D401
if self.Name == 'gevmin':
return self.Dist.rvs(size=size) * (-1)
- elif self.Name == 'negativebinomial':
+ elif self.Name == 'negativebinomial': # noqa: RET505
samples = self.Dist.rvs(size=size) + self.Par['k']
- return samples
+ return samples # noqa: RET504
else:
samples = self.Dist.rvs(size=size)
- return samples
+ return samples # noqa: RET504
# %%
def icdf(self, y):
- """Returns the value of the inverse CDF."""
+ """Returns the value of the inverse CDF.""" # noqa: D401
if self.Name == 'gevmin':
return -self.Dist.ppf(1 - y)
- elif self.Name == 'negativebinomial':
+ elif self.Name == 'negativebinomial': # noqa: RET505
return self.Dist.ppf(y) + self.Par['k']
else:
@@ -1135,7 +1135,7 @@ def gevfit_alt(y):
The function gevfit_alt evaluates the parameters of the generalized
extreme value distribution with the method of Probability Weighted
Moments (PWM) and Maximum Likelihood Estimation (MLE).
- """
+ """ # noqa: D205, D401
# compute PWM estimates
x01 = gevpwm(y)
@@ -1144,30 +1144,30 @@ def gevfit_alt(y):
x02 = stats.genextreme.fit(y, scale=x01[1], loc=x01[2])
x02 = np.array([-x02[0], x02[2], x02[1]])
# if alpha reasonable
- if x02[1] >= 1.0e-6:
+ if x02[1] >= 1.0e-6: # noqa: PLR2004
# set parameters
par = x02
if par[0] < -1:
par = x01
- warnings.warn(
+ warnings.warn( # noqa: B028
'The MLE estimate of the shape parameter of the GEV is not in the range where the MLE estimator is valid. PWM estimation is used.'
)
- if par[0] > 0.4:
- warnings.warn(
+ if par[0] > 0.4: # noqa: PLR2004
+ warnings.warn( # noqa: B028
'The shape parameter of the GEV is not in the range where PWM asymptotic results are valid.'
)
else:
# set parameters obtained by PWM
par = x01
- if par[0] > 0.4:
- warnings.warn(
+ if par[0] > 0.4: # noqa: PLR2004
+ warnings.warn( # noqa: B028
'The shape parameter of the GEV is not in the range where PWM asymptotic results are valid.'
)
else:
# set parameters obtained by PWM
par = x01
- if par[0] < -0.4:
- warnings.warn(
+ if par[0] < -0.4: # noqa: PLR2004
+ warnings.warn( # noqa: B028
'The shape parameter of the GEV is not in the range where PWM asymptotic results are valid.'
)
@@ -1182,7 +1182,7 @@ def gevpwm(y):
The function gevpwm evaluates the parameters of the generalized
extreme value distribution applying the method of Probability Weighted
Moments.
- """
+ """ # noqa: D205, D401
# compute PWM estimates
y2 = np.sort(y)
beta0 = np.mean(y)
@@ -1197,7 +1197,7 @@ def gevpwm(y):
c = (2 * beta1 - beta0) / (3 * beta2 - beta0) - np.log(2) / np.log(3)
par0 = -7.8590 * c - 2.9554 * c**2
- equation = lambda x: (3 * beta2 - beta0) / (2 * beta1 - beta0) - (1 - 3**x) / (
+ equation = lambda x: (3 * beta2 - beta0) / (2 * beta1 - beta0) - (1 - 3**x) / ( # noqa: E731
1 - 2**x
)
sol = optimize.fsolve(equation, x0=par0, full_output=True)
@@ -1213,7 +1213,7 @@ def gevpwm(y):
)
par[2] = beta0 - par[1] / par[0] * (special.gamma(1 - par[0]) - 1)
else:
- warnings.warn(
+ warnings.warn( # noqa: B028
'fsolve could not converge to a solution for the PWM estimate.'
)
diff --git a/modules/performUQ/common/ERAClasses/ERANataf.py b/modules/performUQ/common/ERAClasses/ERANataf.py
index 09f68987a..81f2a70d9 100644
--- a/modules/performUQ/common/ERAClasses/ERANataf.py
+++ b/modules/performUQ/common/ERAClasses/ERANataf.py
@@ -1,4 +1,4 @@
-# import of modules
+# import of modules # noqa: INP001, D100
import numpy as np
from scipy import optimize, stats
@@ -58,7 +58,7 @@
models with prescribed marginals and covariances.
Probabilistic Engineering Mechanics 1(2), 105-112
---------------------------------------------------------------------------
-"""
+""" # noqa: W291
# %%
@@ -78,12 +78,12 @@ class ERANataf:
correlation matrix, the input matrix must be symmetric, the matrix entries
on the diagonal must be equal to one, all other entries (correlation
coefficients) can have values between -1 and 1.
- """
+ """ # noqa: D205, D400
- def __init__(self, M, Correlation):
+ def __init__(self, M, Correlation): # noqa: C901, N803
"""Constructor method, for more details have a look at the
class description.
- """
+ """ # noqa: D205, D401
self.Marginals = np.array(M, ndmin=1)
self.Marginals = self.Marginals.ravel()
self.Rho_X = np.array(Correlation, ndmin=2)
@@ -95,8 +95,8 @@ class description.
np.isfinite(self.Marginals[i].mean())
and np.isfinite(self.Marginals[i].std())
):
- raise RuntimeError(
- 'The marginal distributions need to have '
+ raise RuntimeError( # noqa: TRY003
+ 'The marginal distributions need to have ' # noqa: EM101
'finite mean and variance'
)
@@ -104,18 +104,18 @@ class description.
try:
np.linalg.cholesky(self.Rho_X)
except np.linalg.LinAlgError:
- raise RuntimeError(
- 'The given correlation matrix is not positive definite'
+ raise RuntimeError( # noqa: B904, TRY003
+ 'The given correlation matrix is not positive definite' # noqa: EM101
'--> Nataf transformation is not applicable.'
)
if not np.all(self.Rho_X - self.Rho_X.T == 0):
- raise RuntimeError(
- 'The given correlation matrix is not symmetric '
+ raise RuntimeError( # noqa: TRY003
+ 'The given correlation matrix is not symmetric ' # noqa: EM101
'--> Nataf transformation is not applicable.'
)
if not np.all(np.diag(self.Rho_X) == 1):
- raise RuntimeError(
- 'Not all diagonal entries of the given correlation matrix are equal to one '
+ raise RuntimeError( # noqa: TRY003
+ 'Not all diagonal entries of the given correlation matrix are equal to one ' # noqa: EM101
'--> Nataf transformation is not applicable.'
)
@@ -150,7 +150,7 @@ class description.
if self.Rho_X[i, j] == 0:
continue
- elif (
+ elif ( # noqa: RET507
(
(self.Marginals[i].Name == 'standardnormal')
and (self.Marginals[j].Name == 'standardnormal')
@@ -167,7 +167,7 @@ class description.
elif (self.Marginals[i].Name == 'normal') and (
self.Marginals[j].Name == 'lognormal'
):
- Vj = self.Marginals[j].std() / self.Marginals[j].mean()
+ Vj = self.Marginals[j].std() / self.Marginals[j].mean() # noqa: N806
self.Rho_Z[i, j] = (
self.Rho_X[i, j] * Vj / np.sqrt(np.log(1 + Vj**2))
)
@@ -177,7 +177,7 @@ class description.
elif (self.Marginals[i].Name == 'lognormal') and (
self.Marginals[j].Name == 'normal'
):
- Vi = self.Marginals[i].std() / self.Marginals[i].mean()
+ Vi = self.Marginals[i].std() / self.Marginals[i].mean() # noqa: N806
self.Rho_Z[i, j] = (
self.Rho_X[i, j] * Vi / np.sqrt(np.log(1 + Vi**2))
)
@@ -187,8 +187,8 @@ class description.
elif (self.Marginals[i].Name == 'lognormal') and (
self.Marginals[j].Name == 'lognormal'
):
- Vi = self.Marginals[i].std() / self.Marginals[i].mean()
- Vj = self.Marginals[j].std() / self.Marginals[j].mean()
+ Vi = self.Marginals[i].std() / self.Marginals[i].mean() # noqa: N806
+ Vj = self.Marginals[j].std() / self.Marginals[j].mean() # noqa: N806
self.Rho_Z[i, j] = np.log(
1 + self.Rho_X[i, j] * Vi * Vj
) / np.sqrt(np.log(1 + Vi**2) * np.log(1 + Vj**2))
@@ -208,8 +208,8 @@ class description.
def fun(rho0):
return (
- coef * self.bivariateNormalPdf(xi, eta, rho0)
- ).sum() - self.Rho_X[i, j]
+ coef * self.bivariateNormalPdf(xi, eta, rho0) # noqa: B023
+ ).sum() - self.Rho_X[i, j] # noqa: B023
x0, r = optimize.brentq(
f=fun,
@@ -235,7 +235,7 @@ def fun(rho0):
self.Rho_Z[i, j] = sol[0]
self.Rho_Z[j, i] = self.Rho_Z[i, j]
else:
- for i in range(10):
+ for i in range(10): # noqa: B007, PLW2901
init = 2 * np.random.rand() - 1
sol = optimize.fsolve(
func=fun, x0=init, full_output=True
@@ -246,8 +246,8 @@ def fun(rho0):
self.Rho_Z[i, j] = sol[0]
self.Rho_Z[j, i] = self.Rho_Z[i, j]
else:
- raise RuntimeError(
- 'brentq and fsolve coul'
+ raise RuntimeError( # noqa: TRY003
+ 'brentq and fsolve coul' # noqa: EM101
'd not converge to a '
'solution of the Nataf '
'integral equation'
@@ -255,8 +255,8 @@ def fun(rho0):
try:
self.A = np.linalg.cholesky(self.Rho_Z)
except np.linalg.LinAlgError:
- raise RuntimeError(
- 'Transformed correlation matrix is not positive'
+ raise RuntimeError( # noqa: B904, TRY003
+ 'Transformed correlation matrix is not positive' # noqa: EM101
' definite --> Nataf transformation is not '
'applicable.'
)
@@ -271,7 +271,7 @@ def fun(rho0):
Jacobian of this Transformation if it is needed.
"""
- def X2U(self, X, Jacobian=False):
+ def X2U(self, X, Jacobian=False): # noqa: FBT002, N802, N803
"""Carries out the transformation from physical space X to
standard normal space U.
X must be a [n,d]-shaped array (n = number of data points,
@@ -279,9 +279,9 @@ def X2U(self, X, Jacobian=False):
The Jacobian of the transformation of the first given data
point is only given as an output in case that the input
argument Jacobian=True .
- """
+ """ # noqa: D205
n_dim = len(self.Marginals)
- X = np.array(X, ndmin=2)
+ X = np.array(X, ndmin=2) # noqa: N806
# check if all marginal distributions are continuous
for i in range(n_dim):
@@ -291,39 +291,39 @@ def X2U(self, X, Jacobian=False):
'negativebinomial',
'poisson',
]:
- raise RuntimeError(
- 'At least one of the marginal distributions is a discrete distribution,'
+ raise RuntimeError( # noqa: TRY003
+ 'At least one of the marginal distributions is a discrete distribution,' # noqa: EM101
'the transformation X2U is therefore not possible.'
)
# check of the dimensions of input X
- if X.ndim > 2:
- raise RuntimeError('X must have not more than two dimensions. ')
+ if X.ndim > 2: # noqa: PLR2004
+ raise RuntimeError('X must have not more than two dimensions. ') # noqa: EM101, TRY003
if np.shape(X)[1] == 1 and n_dim != 1:
# in case that only one point X is given, he can be defined either as row or column vector
- X = X.T
+ X = X.T # noqa: N806
if np.shape(X)[1] != n_dim:
- raise RuntimeError(
- 'X must be an array of size [n,d], where d is the'
+ raise RuntimeError( # noqa: TRY003
+ 'X must be an array of size [n,d], where d is the' # noqa: EM101
' number of dimensions of the joint distribution.'
)
- Z = np.zeros(np.flip(X.shape))
+ Z = np.zeros(np.flip(X.shape)) # noqa: N806
for i in range(n_dim):
Z[i, :] = stats.norm.ppf(self.Marginals[i].cdf(X[:, i]))
- U = np.linalg.solve(self.A, Z.squeeze()).T
+ U = np.linalg.solve(self.A, Z.squeeze()).T # noqa: N806
if Jacobian:
diag = np.zeros([n_dim, n_dim])
for i in range(n_dim):
diag[i, i] = self.Marginals[i].pdf(X[0, i]) / stats.norm.pdf(Z[i, 0])
- Jac = np.linalg.solve(self.A, diag)
+ Jac = np.linalg.solve(self.A, diag) # noqa: N806
return np.squeeze(U), Jac
- else:
+ else: # noqa: RET505
return np.squeeze(U)
# %%
- def U2X(self, U, Jacobian=False):
+ def U2X(self, U, Jacobian=False): # noqa: FBT002, N802, N803
"""Carries out the transformation from standard normal space U
to physical space X.
U must be a [n,d]-shaped array (n = number of data points,
@@ -331,26 +331,26 @@ def U2X(self, U, Jacobian=False):
The Jacobian of the transformation of the first given data
point is only given as an output in case that the input
argument Jacobian=True .
- """
+ """ # noqa: D205
n_dim = len(self.Marginals)
- U = np.array(U, ndmin=2)
+ U = np.array(U, ndmin=2) # noqa: N806
# check of the dimensions of input U
- if U.ndim > 2:
- raise RuntimeError('U must have not more than two dimensions. ')
+ if U.ndim > 2: # noqa: PLR2004
+ raise RuntimeError('U must have not more than two dimensions. ') # noqa: EM101, TRY003
if np.shape(U)[1] == 1 and n_dim != 1:
# in case that only one point U is given, he can be defined either as row or column vector
- U = U.T
+ U = U.T # noqa: N806
if np.shape(U)[1] != n_dim:
- raise RuntimeError(
- 'U must be an array of size [n,d], where d is the'
+ raise RuntimeError( # noqa: TRY003
+ 'U must be an array of size [n,d], where d is the' # noqa: EM101
' number of dimensions of the joint distribution.'
)
- else:
- U = U.T
- Z = self.A @ U
+ else: # noqa: RET506
+ U = U.T # noqa: N806
+ Z = self.A @ U # noqa: N806
- X = np.zeros(np.flip(U.shape))
+ X = np.zeros(np.flip(U.shape)) # noqa: N806
for i in range(n_dim):
X[:, i] = self.Marginals[i].icdf(stats.norm.cdf(Z[i, :]))
@@ -358,20 +358,20 @@ def U2X(self, U, Jacobian=False):
diag = np.zeros([n_dim, n_dim])
for i in range(n_dim):
diag[i, i] = stats.norm.pdf(Z[i, 0]) / self.Marginals[i].pdf(X[0, i])
- Jac = np.dot(diag, self.A)
+ Jac = np.dot(diag, self.A) # noqa: N806
return np.squeeze(X), Jac
- else:
+ else: # noqa: RET505
return np.squeeze(X)
# %%
def random(self, n=1):
"""Creates n samples of the joint distribution.
Every row in the output array corresponds to one sample.
- """
+ """ # noqa: D205, D401
n = int(n)
n_dim = np.size(self.Marginals)
- U = np.random.randn(n_dim, n)
- Z = np.dot(self.A, U)
+ U = np.random.randn(n_dim, n) # noqa: N806
+ Z = np.dot(self.A, U) # noqa: N806
jr = np.zeros([n, n_dim])
for i in range(n_dim):
jr[:, i] = self.Marginals[i].icdf(stats.norm.cdf(Z[i, :]))
@@ -379,13 +379,13 @@ def random(self, n=1):
return np.squeeze(jr)
# %%
- def pdf(self, X):
+ def pdf(self, X): # noqa: C901, N803
"""Computes the joint PDF.
X must be a [n,d]-shaped array (n = number of data points,
d = dimensions).
- """
+ """ # noqa: D205, D401
n_dim = len(self.Marginals)
- X = np.array(X, ndmin=2)
+ X = np.array(X, ndmin=2) # noqa: N806
# check if all marginal distributions are continuous
for i in range(n_dim):
@@ -395,25 +395,25 @@ def pdf(self, X):
'negativebinomial',
'poisson',
]:
- raise RuntimeError(
- 'At least one of the marginal distributions is a discrete distribution,'
+ raise RuntimeError( # noqa: TRY003
+ 'At least one of the marginal distributions is a discrete distribution,' # noqa: EM101
'the transformation X2U is therefore not possible.'
)
# check of the dimensions of input X
- if X.ndim > 2:
- raise RuntimeError('X must have not more than two dimensions.')
+ if X.ndim > 2: # noqa: PLR2004
+ raise RuntimeError('X must have not more than two dimensions.') # noqa: EM101, TRY003
if np.shape(X)[1] == 1 and n_dim != 1:
# in case that only one point X is given, he can be defined either as row or column vector
- X = X.T
+ X = X.T # noqa: N806
if np.shape(X)[1] != n_dim:
- raise RuntimeError(
- 'X must be an array of size [n,d], where d is the'
+ raise RuntimeError( # noqa: TRY003
+ 'X must be an array of size [n,d], where d is the' # noqa: EM101
' number of dimensions of the joint distribution.'
)
- n_X = np.shape(X)[0]
- U = np.zeros([n_X, n_dim])
+ n_X = np.shape(X)[0] # noqa: N806
+ U = np.zeros([n_X, n_dim]) # noqa: N806
phi = np.zeros([n_dim, n_X])
f = np.zeros([n_dim, n_X])
mu = np.zeros(n_dim)
@@ -428,7 +428,7 @@ def pdf(self, X):
jointpdf[i] = (
np.prod(f[:, i]) / (np.prod(phi[:, i]) + realmin)
) * phi_n[i]
- except IndexError:
+ except IndexError: # noqa: PERF203
# In case of n=1, phi_n is a scalar.
jointpdf[i] = (
np.prod(f[:, i]) / (np.prod(phi[:, i]) + realmin)
@@ -438,11 +438,11 @@ def pdf(self, X):
if np.size(jointpdf) == 1:
return jointpdf[0]
- else:
+ else: # noqa: RET505
return jointpdf
# %%
- def cdf(self, X):
+ def cdf(self, X): # noqa: N803
"""Computes the joint CDF.
X must be a [n,d]-shaped array (n = number of data points,
d = dimensions).
@@ -450,23 +450,23 @@ def cdf(self, X):
In scipy the multivariate normal cdf is computed by Monte Carlo
sampling, the output of this method is therefore also a
stochastic quantity.
- """
+ """ # noqa: D205, D401
n_dim = len(self.Marginals)
- X = np.array(X, ndmin=2)
+ X = np.array(X, ndmin=2) # noqa: N806
# check of the dimensions of input X
- if X.ndim > 2:
- raise RuntimeError('X must have not more than two dimensions. ')
+ if X.ndim > 2: # noqa: PLR2004
+ raise RuntimeError('X must have not more than two dimensions. ') # noqa: EM101, TRY003
if np.shape(X)[1] == 1 and n_dim != 1:
# in case that only one point X is given, he can be defined either as row or column vector
- X = X.T
+ X = X.T # noqa: N806
if np.shape(X)[1] != n_dim:
- raise RuntimeError(
- 'X must be an array of size [n,d], where d is the'
+ raise RuntimeError( # noqa: TRY003
+ 'X must be an array of size [n,d], where d is the' # noqa: EM101
' number of dimensions of the joint distribution.'
)
- n_X = np.shape(X)[0]
- U = np.zeros([n_X, n_dim])
+ n_X = np.shape(X)[0] # noqa: N806
+ U = np.zeros([n_X, n_dim]) # noqa: N806
for i in range(n_dim):
U[:, i] = stats.norm.ppf(self.Marginals[i].cdf(X[:, i]))
mu = np.zeros(n_dim)
@@ -474,11 +474,11 @@ def cdf(self, X):
U, mean=mu, cov=np.matrix(self.Rho_Z)
)
- return jointcdf
+ return jointcdf # noqa: RET504
# %%
@staticmethod
- def bivariateNormalPdf(x1, x2, rho):
+ def bivariateNormalPdf(x1, x2, rho): # noqa: N802, D102
return (
1
/ (2 * np.pi * np.sqrt(1 - rho**2))
diff --git a/modules/performUQ/common/ERAClasses/ERARosen.py b/modules/performUQ/common/ERAClasses/ERARosen.py
index 1304fe379..7162da559 100644
--- a/modules/performUQ/common/ERAClasses/ERARosen.py
+++ b/modules/performUQ/common/ERAClasses/ERARosen.py
@@ -1,4 +1,4 @@
-# import of modules
+# import of modules # noqa: INP001, D100
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
@@ -59,7 +59,7 @@
3. Documentation of the ERA Distribution Classes
---------------------------------------------------------------------------
-"""
+""" # noqa: W291
# %%
@@ -83,20 +83,20 @@ class ERARosen:
distributions the order of the indices within one of the arrays
corresponds to the order of the variables of the respective function
handle of the respective ERACond object.
- """
+ """ # noqa: D205, D400
def __init__(self, dist, depend):
"""Constructor method, for more details have a look at the
class description.
- """
+ """ # noqa: D205, D401
self.Dist = dist
self.Parents = depend
n_dist = len(dist)
n_dist_dep = len(depend)
if n_dist != n_dist_dep:
- raise RuntimeError(
- 'The number of distributions according to the inputs'
+ raise RuntimeError( # noqa: TRY003
+ 'The number of distributions according to the inputs' # noqa: EM101
" dist and depend doesn't match."
)
@@ -105,8 +105,8 @@ class description.
if isinstance(dist[i], ERACond):
n_parents[i] = dist[i].Param.__code__.co_argcount
elif not isinstance(dist[i], ERADist):
- raise RuntimeError(
- 'The objects in dist must be either ERADist or ERACond objects.'
+ raise RuntimeError( # noqa: TRY003, TRY004
+ 'The objects in dist must be either ERADist or ERACond objects.' # noqa: EM101
)
# build adjacency matrix
@@ -115,18 +115,18 @@ class description.
adj_mat[i, depend[i]] = 1
# check if obtained network represents a directed acyclical graph
adj_prod = np.identity(n_dist)
- for i in range(n_dist + 1):
+ for i in range(n_dist + 1): # noqa: B007
adj_prod = np.matmul(adj_prod, adj_mat)
if sum(np.diag(adj_prod)) != 0:
- raise RuntimeError(
- 'The graph defining the dependence between the different '
+ raise RuntimeError( # noqa: TRY003
+ 'The graph defining the dependence between the different ' # noqa: EM101
'distributions must be directed and acyclical.'
)
self.Adjacency = np.matrix(adj_mat)
# sort distributions according to dependencies
- layers = list()
+ layers = list() # noqa: C408
rem_dist = np.arange(0, n_dist)
while len(rem_dist) > 0:
n_dep_rem = np.sum(adj_mat, 1)
@@ -141,13 +141,13 @@ class description.
self.Order = [layers[0], np.concatenate(layers[1:])]
self.Layers = layers
else:
- raise RuntimeError(
- 'The defined joint distribution consists only of independent distributions.'
+ raise RuntimeError( # noqa: TRY003
+ 'The defined joint distribution consists only of independent distributions.' # noqa: EM101
'This type of joint distribution is not supported by ERARosen.'
)
# %%
- def X2U(self, X, error=True):
+ def X2U(self, X, error=True): # noqa: FBT002, N802, N803
"""Carries out the transformation from physical space X to
standard normal space U.
X must be a [n,d]-shaped array (n = number of data points,
@@ -155,9 +155,9 @@ def X2U(self, X, error=True):
If no error message should be given in case of the detection
of an improper distribution, give error=False as second input.
The output for the improper data points is then given as nan.
- """
+ """ # noqa: D205
n_dim = len(self.Dist)
- X = np.array(X, ndmin=2, dtype=float)
+ X = np.array(X, ndmin=2, dtype=float) # noqa: N806
# check if all marginal and conditional distributions are continuous
for i in range(n_dim):
@@ -167,25 +167,25 @@ def X2U(self, X, error=True):
'negativebinomial',
'poisson',
]:
- raise RuntimeError(
- 'At least one of the marginal distributions or conditional distributions '
+ raise RuntimeError( # noqa: TRY003
+ 'At least one of the marginal distributions or conditional distributions ' # noqa: EM101
'is a discrete distribution, the transformation X2U is therefore not possible.'
)
# check of the dimensions of input X
- if X.ndim > 2:
- raise RuntimeError('X must have not more than two dimensions. ')
+ if X.ndim > 2: # noqa: PLR2004
+ raise RuntimeError('X must have not more than two dimensions. ') # noqa: EM101, TRY003
if np.shape(X)[1] == 1 and n_dim != 1:
# in case that only one point X is given, he can be defined either as row or column vector
- X = X.T
+ X = X.T # noqa: N806
if np.shape(X)[1] != n_dim:
- raise RuntimeError(
- 'X must be an array of size [n,d], where d is the'
+ raise RuntimeError( # noqa: TRY003
+ 'X must be an array of size [n,d], where d is the' # noqa: EM101
' number of dimensions of the joint distribution.'
)
- n_X = np.shape(X)[0]
- U = np.zeros([n_X, n_dim])
+ n_X = np.shape(X)[0] # noqa: N806
+ U = np.zeros([n_X, n_dim]) # noqa: N806
for i in self.Order[0]:
U[:, i] = stats.norm.ppf(self.Dist[i].cdf(X[:, i]))
@@ -200,14 +200,14 @@ def X2U(self, X, error=True):
if error:
if not all(np.logical_not(lin_ind)):
- raise RuntimeError('Invalid joint distribution was created.')
+ raise RuntimeError('Invalid joint distribution was created.') # noqa: EM101, TRY003
else:
U[lin_ind, :] = np.nan
return np.squeeze(U)
# %%
- def U2X(self, U, error=True):
+ def U2X(self, U, error=True): # noqa: FBT002, N802, N803
"""Carries out the transformation from standard normal space U
to physical space X .
U must be a [n,d]-shaped array (n = number of data points,
@@ -215,25 +215,25 @@ def U2X(self, U, error=True):
If no error message should be given in case of the detection
of an improper distribution, give error=False as second input.
The output for the improper data points is then given as nan.
- """
+ """ # noqa: D205
n_dim = len(self.Dist)
- U = np.array(U, ndmin=2, dtype=float)
+ U = np.array(U, ndmin=2, dtype=float) # noqa: N806
# check of the dimensions of input U
- if U.ndim > 2:
- raise RuntimeError('U must have not more than two dimensions. ')
+ if U.ndim > 2: # noqa: PLR2004
+ raise RuntimeError('U must have not more than two dimensions. ') # noqa: EM101, TRY003
if np.shape(U)[1] == 1 and n_dim != 1:
# in case that only one point X is given, he can be defined either as row or column vector
- U = U.T
+ U = U.T # noqa: N806
if np.shape(U)[1] != n_dim:
- raise RuntimeError(
- 'U must be an array of size [n,d], where d is the'
+ raise RuntimeError( # noqa: TRY003
+ 'U must be an array of size [n,d], where d is the' # noqa: EM101
' number of dimensions of the joint distribution.'
)
- n_U = np.shape(U)[0]
- X = np.zeros([n_U, n_dim])
- CDF_values = stats.norm.cdf(U)
+ n_U = np.shape(U)[0] # noqa: N806
+ X = np.zeros([n_U, n_dim]) # noqa: N806
+ CDF_values = stats.norm.cdf(U) # noqa: N806
for i in self.Order[0]:
X[:, i] = self.Dist[i].icdf(CDF_values[:, i])
@@ -246,37 +246,37 @@ def U2X(self, U, error=True):
if error:
if not np.all(np.logical_not(lin_ind)):
- raise RuntimeError('Invalid joint distribution was created.')
+ raise RuntimeError('Invalid joint distribution was created.') # noqa: EM101, TRY003
else:
X[lin_ind, :] = np.nan
return np.squeeze(X)
# %%
- def pdf(self, X, error=True):
+ def pdf(self, X, error=True): # noqa: FBT002, N803
"""Computes the joint PDF.
X must be a [n,d]-shaped array (n = number of data points,
d = dimensions).
If no error message should be given in case of the detection
of an improper distribution, give error=False as second input.
The output for the improper data points is then given as nan.
- """
+ """ # noqa: D205, D401
n_dim = len(self.Dist)
- X = np.array(X, ndmin=2, dtype=float)
+ X = np.array(X, ndmin=2, dtype=float) # noqa: N806
# check of the dimensions of input X
- if X.ndim > 2:
- raise RuntimeError('X must have not more than two dimensions. ')
+ if X.ndim > 2: # noqa: PLR2004
+ raise RuntimeError('X must have not more than two dimensions. ') # noqa: EM101, TRY003
if np.shape(X)[1] == 1 and n_dim != 1:
# in case that only one point X is given, he can be defined either as row or column vector
- X = X.T
+ X = X.T # noqa: N806
if np.shape(X)[1] != n_dim:
- raise RuntimeError(
- 'X must be an array of size [n,d], where d is the'
+ raise RuntimeError( # noqa: TRY003
+ 'X must be an array of size [n,d], where d is the' # noqa: EM101
' number of dimensions of the joint distribution.'
)
- n_X = np.shape(X)[0]
+ n_X = np.shape(X)[0] # noqa: N806
pdf_values = np.zeros([n_X, n_dim])
for i in self.Order[0]:
@@ -290,20 +290,20 @@ def pdf(self, X, error=True):
if error:
if not np.all(np.logical_not(nan_ind)):
- raise RuntimeError('Invalid joint distribution was created.')
+ raise RuntimeError('Invalid joint distribution was created.') # noqa: EM101, TRY003
if np.size(jointpdf) == 1:
return jointpdf[0]
- else:
+ else: # noqa: RET505
return jointpdf
# %%
def random(self, n=1):
"""Creates n samples of the joint distribution.
Every row in the output array corresponds to one sample.
- """
+ """ # noqa: D205, D401
n_dim = len(self.Dist)
- X = np.zeros([n, n_dim])
+ X = np.zeros([n, n_dim]) # noqa: N806
for i in self.Order[0]:
X[:, i] = self.Dist[i].random(n)
@@ -311,13 +311,13 @@ def random(self, n=1):
for i in self.Order[1]:
try:
X[:, i] = self.Dist[i].condRandom(X[:, self.Parents[i]])
- except ValueError:
- raise RuntimeError('Invalid joint distribution was created.')
+ except ValueError: # noqa: PERF203
+ raise RuntimeError('Invalid joint distribution was created.') # noqa: B904, EM101, TRY003
return np.squeeze(X)
# %%
- def plotGraph(self, opt=False):
+ def plotGraph(self, opt=False): # noqa: FBT002, C901, N802
"""Plots the Bayesian network which defines the dependency
between the different distributions.
If opt is given as 'numbering' the nodes are named according
@@ -326,11 +326,11 @@ def plotGraph(self, opt=False):
distribution, the distribution is also named according to its
position in dist, otherwise the property ID is taken as the
name of the distribution.
- """
+ """ # noqa: D205, D401
n_layer = len(self.Layers)
vert = np.flip(np.linspace(0, 1, n_layer))
- pos_n = dict()
- pos_l = dict()
+ pos_n = dict() # noqa: C408
+ pos_l = dict() # noqa: C408
for i in range(n_layer):
cur_l = self.Layers[i]
n_cur = len(cur_l)
@@ -340,7 +340,7 @@ def plotGraph(self, opt=False):
pos_l[cur_l[j]] = (horiz[j + 1] + 0.06, vert[i])
n_dim = len(self.Dist)
- labels = dict()
+ labels = dict() # noqa: C408
if not opt:
for i in range(n_dim):
if self.Dist[i].ID:
@@ -351,10 +351,10 @@ def plotGraph(self, opt=False):
for i in range(n_dim):
labels[i] = '#' + str(i)
else:
- raise RuntimeError("opt must be given as 'numbering'.")
+ raise RuntimeError("opt must be given as 'numbering'.") # noqa: EM101, TRY003
- G_Adj = nx.from_numpy_matrix(self.Adjacency)
- G = nx.DiGraph()
+ G_Adj = nx.from_numpy_matrix(self.Adjacency) # noqa: N806
+ G = nx.DiGraph() # noqa: N806
for i in range(1, n_layer):
cur_l = self.Layers[i]
n_cur = len(cur_l)
diff --git a/modules/performUQ/common/parallel_runner_mpi4py.py b/modules/performUQ/common/parallel_runner_mpi4py.py
index 01e69946d..bf8de20bc 100644
--- a/modules/performUQ/common/parallel_runner_mpi4py.py
+++ b/modules/performUQ/common/parallel_runner_mpi4py.py
@@ -1,27 +1,27 @@
-from mpi4py import MPI
+from mpi4py import MPI # noqa: INP001, D100
from mpi4py.futures import MPIPoolExecutor
-class ParallelRunnerMPI4PY:
+class ParallelRunnerMPI4PY: # noqa: D101
def __init__(self, run_type: str = 'runningRemote') -> None:
self.run_type = run_type
self.comm = MPI.COMM_WORLD
self.num_processors = self.get_num_processors()
self.pool = self.get_pool()
- def get_num_processors(self) -> int:
+ def get_num_processors(self) -> int: # noqa: D102
num_processors = self.comm.Get_size()
if num_processors is None:
num_processors = 1
if num_processors < 1:
- raise ValueError(
- 'Number of processes must be at least 1. Got {num_processors}'
+ raise ValueError( # noqa: TRY003
+ 'Number of processes must be at least 1. Got {num_processors}' # noqa: EM101
)
return num_processors
- def get_pool(self) -> MPIPoolExecutor:
+ def get_pool(self) -> MPIPoolExecutor: # noqa: D102
self.pool = MPIPoolExecutor(max_workers=self.num_processors)
return self.pool
- def close_pool(self) -> None:
+ def close_pool(self) -> None: # noqa: D102
self.pool.shutdown()
diff --git a/modules/performUQ/common/quoFEM_RV_models.py b/modules/performUQ/common/quoFEM_RV_models.py
index 7e21a396e..689c4ca8c 100644
--- a/modules/performUQ/common/quoFEM_RV_models.py
+++ b/modules/performUQ/common/quoFEM_RV_models.py
@@ -1,4 +1,4 @@
-import typing
+import typing # noqa: INP001, D100
from typing import Any
import numpy as np
@@ -21,81 +21,81 @@
_supported_variable_classes = typing.Literal['Uncertain', 'Design', 'Uniform', 'NA']
-def _get_ERADistObjectName(name_from_quoFEM: str) -> str:
- _ERADistNames = {}
+def _get_ERADistObjectName(name_from_quoFEM: str) -> str: # noqa: N802, N803
+ _ERADistNames = {} # noqa: N806
_ERADistNames['ChiSquared'] = 'chisquare'
try:
nm = _ERADistNames[name_from_quoFEM].value
- except:
+ except: # noqa: E722
nm = name_from_quoFEM.lower()
return nm
-def _get_ERADistOpt(input_type_from_quoFEM: str) -> str:
- _ERADistOpts = {}
+def _get_ERADistOpt(input_type_from_quoFEM: str) -> str: # noqa: N802, N803
+ _ERADistOpts = {} # noqa: N806
_ERADistOpts['Parameters'] = 'PAR'
_ERADistOpts['Moments'] = 'MOM'
_ERADistOpts['Dataset'] = 'DATA'
try:
opt = _ERADistOpts[input_type_from_quoFEM].value
- except:
+ except: # noqa: E722
opt = 'PAR'
return opt
-class RVData(pydantic.BaseModel):
+class RVData(pydantic.BaseModel): # noqa: D101
distribution: _supported_distributions
name: str
- inputType: _supported_input_types = 'Parameters'
- refCount: int
+ inputType: _supported_input_types = 'Parameters' # noqa: N815
+ refCount: int # noqa: N815
value: str
- variableClass: _supported_variable_classes
+ variableClass: _supported_variable_classes # noqa: N815
ERAName: str = ''
ERAOpt: str = ''
ERAVal: list = []
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAName = _get_ERADistObjectName(self.distribution)
self.ERAOpt = _get_ERADistOpt(self.inputType)
return super().model_post_init(__context)
############################################
-class BetaUncertainData(RVData):
+class BetaUncertainData(RVData): # noqa: D101
lowerbound: float = 0.0
upperbound: float = 1.0
@pydantic.validator('upperbound')
- def upper_bound_not_bigger_than_lower_bound(v, values):
+ def upper_bound_not_bigger_than_lower_bound(v, values): # noqa: N805, D102
if 'lowerbound' in values and v <= values['lowerbound']:
raise ValueError(f"The upper bound must be bigger than the \
lower bound {values['lowerbound']}. \
- Got a value of {v}.")
+ Got a value of {v}.") # noqa: EM102, TRY003
return v
-class BetaParameters(BetaUncertainData):
+class BetaParameters(BetaUncertainData): # noqa: D101
alphas: pydantic.PositiveFloat
betas: pydantic.PositiveFloat
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.alphas, self.betas, self.lowerbound, self.upperbound]
return super().model_post_init(__context)
-class BetaMoments(BetaUncertainData):
+class BetaMoments(BetaUncertainData): # noqa: D101
mean: float
- standardDev: pydantic.PositiveFloat
+ standardDev: pydantic.PositiveFloat # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.mean, self.standardDev, self.lowerbound, self.upperbound]
return super().model_post_init(__context)
-class BetaDataset(BetaUncertainData):
- dataDir: str
+class BetaDataset(BetaUncertainData): # noqa: D101
+ dataDir: str # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [
np.genfromtxt(self.dataDir).tolist(),
[self.lowerbound, self.upperbound],
@@ -104,183 +104,183 @@ def model_post_init(self, __context: Any) -> None:
############################################
-class ChiSquaredUncertainData(RVData):
+class ChiSquaredUncertainData(RVData): # noqa: D101
pass
-class ChiSquaredParameters(ChiSquaredUncertainData):
+class ChiSquaredParameters(ChiSquaredUncertainData): # noqa: D101
k: Annotated[int, Field(ge=1)]
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.k]
return super().model_post_init(__context)
-class ChiSquaredMoments(ChiSquaredUncertainData):
+class ChiSquaredMoments(ChiSquaredUncertainData): # noqa: D101
mean: pydantic.PositiveFloat
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.mean]
return super().model_post_init(__context)
-class ChiSquaredDataset(ChiSquaredUncertainData):
- dataDir: str
+class ChiSquaredDataset(ChiSquaredUncertainData): # noqa: D101
+ dataDir: str # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [np.genfromtxt(self.dataDir).tolist()]
return super().model_post_init(__context)
############################################
-class ExponentialUncertainData(RVData):
+class ExponentialUncertainData(RVData): # noqa: D101
pass
-class ExponentialParameters(ExponentialUncertainData):
+class ExponentialParameters(ExponentialUncertainData): # noqa: D101
lamda: pydantic.PositiveFloat = pydantic.Field(alias='lambda')
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.lamda]
return super().model_post_init(__context)
-class ExponentialMoments(ExponentialUncertainData):
+class ExponentialMoments(ExponentialUncertainData): # noqa: D101
mean: pydantic.PositiveFloat
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.mean]
return super().model_post_init(__context)
-class ExponentialDataset(ExponentialUncertainData):
- dataDir: str
+class ExponentialDataset(ExponentialUncertainData): # noqa: D101
+ dataDir: str # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [np.genfromtxt(self.dataDir).tolist()]
return super().model_post_init(__context)
############################################
-class GammaUncertainData(RVData):
+class GammaUncertainData(RVData): # noqa: D101
pass
-class GammaParameters(GammaUncertainData):
+class GammaParameters(GammaUncertainData): # noqa: D101
k: pydantic.PositiveFloat
lamda: pydantic.PositiveFloat = pydantic.Field(alias='lambda')
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.lamda, self.k]
return super().model_post_init(__context)
-class GammaMoments(GammaUncertainData):
+class GammaMoments(GammaUncertainData): # noqa: D101
mean: pydantic.PositiveFloat
- standardDev: pydantic.PositiveFloat
+ standardDev: pydantic.PositiveFloat # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.mean, self.standardDev]
return super().model_post_init(__context)
-class GammaDataset(GammaUncertainData):
- dataDir: str
+class GammaDataset(GammaUncertainData): # noqa: D101
+ dataDir: str # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [np.genfromtxt(self.dataDir).tolist()]
return super().model_post_init(__context)
############################################
-class GumbelUncertainData(RVData):
+class GumbelUncertainData(RVData): # noqa: D101
pass
-class GumbelParameters(GumbelUncertainData):
+class GumbelParameters(GumbelUncertainData): # noqa: D101
alphaparam: pydantic.PositiveFloat
betaparam: float
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.alphaparam, self.betaparam]
return super().model_post_init(__context)
-class GumbelMoments(GumbelUncertainData):
+class GumbelMoments(GumbelUncertainData): # noqa: D101
mean: float
- standardDev: pydantic.PositiveFloat
+ standardDev: pydantic.PositiveFloat # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.mean, self.standardDev]
return super().model_post_init(__context)
-class GumbelDataset(GumbelUncertainData):
- dataDir: str
+class GumbelDataset(GumbelUncertainData): # noqa: D101
+ dataDir: str # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [np.genfromtxt(self.dataDir).tolist()]
return super().model_post_init(__context)
############################################
-class LognormalUncertainData(RVData):
+class LognormalUncertainData(RVData): # noqa: D101
pass
-class LognormalParameters(LognormalUncertainData):
+class LognormalParameters(LognormalUncertainData): # noqa: D101
lamda: float = pydantic.Field(alias='lambda')
zeta: pydantic.PositiveFloat
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.lamda, self.zeta]
return super().model_post_init(__context)
-class LognormalMoments(LognormalUncertainData):
+class LognormalMoments(LognormalUncertainData): # noqa: D101
mean: pydantic.PositiveFloat
- stdDev: pydantic.PositiveFloat
+ stdDev: pydantic.PositiveFloat # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.mean, self.stdDev]
return super().model_post_init(__context)
-class LognormalDataset(LognormalUncertainData):
- dataDir: str
+class LognormalDataset(LognormalUncertainData): # noqa: D101
+ dataDir: str # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [np.genfromtxt(self.dataDir).tolist()]
return super().model_post_init(__context)
############################################
-class NormalUncertainData(RVData):
+class NormalUncertainData(RVData): # noqa: D101
pass
-class NormalParameters(NormalUncertainData):
+class NormalParameters(NormalUncertainData): # noqa: D101
mean: float
- stdDev: pydantic.PositiveFloat
+ stdDev: pydantic.PositiveFloat # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.mean, self.stdDev]
return super().model_post_init(__context)
-class NormalMoments(NormalUncertainData):
+class NormalMoments(NormalUncertainData): # noqa: D101
mean: float
- stdDev: pydantic.PositiveFloat
+ stdDev: pydantic.PositiveFloat # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.mean, self.stdDev]
return super().model_post_init(__context)
-class NormalDataset(NormalUncertainData):
- dataDir: str
+class NormalDataset(NormalUncertainData): # noqa: D101
+ dataDir: str # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [np.genfromtxt(self.dataDir).tolist()]
return super().model_post_init(__context)
@@ -311,71 +311,71 @@ def model_post_init(self, __context: Any) -> None:
############################################
-class UniformUncertainData(RVData):
+class UniformUncertainData(RVData): # noqa: D101
pass
-class UniformParameters(UniformUncertainData):
+class UniformParameters(UniformUncertainData): # noqa: D101
lowerbound: float = 0.0
upperbound: float = 1.0
@pydantic.validator('upperbound')
- def upper_bound_not_bigger_than_lower_bound(v, values):
+ def upper_bound_not_bigger_than_lower_bound(v, values): # noqa: N805, D102
if 'lowerbound' in values and v <= values['lowerbound']:
raise ValueError(f"The upper bound must be bigger than the \
lower bound {values['lowerbound']}. \
- Got a value of {v}.")
+ Got a value of {v}.") # noqa: EM102, TRY003
return v
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.lowerbound, self.upperbound]
return super().model_post_init(__context)
-class UniformMoments(UniformUncertainData):
+class UniformMoments(UniformUncertainData): # noqa: D101
mean: float
- standardDev: pydantic.PositiveFloat
+ standardDev: pydantic.PositiveFloat # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.mean, self.standardDev]
return super().model_post_init(__context)
-class UniformDataset(UniformUncertainData):
- dataDir: str
+class UniformDataset(UniformUncertainData): # noqa: D101
+ dataDir: str # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [np.genfromtxt(self.dataDir).tolist()]
return super().model_post_init(__context)
############################################
-class WeibullUncertainData(RVData):
+class WeibullUncertainData(RVData): # noqa: D101
pass
-class WeibullParameters(WeibullUncertainData):
+class WeibullParameters(WeibullUncertainData): # noqa: D101
scaleparam: pydantic.PositiveFloat
shapeparam: pydantic.PositiveFloat
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.scaleparam, self.shapeparam]
return super().model_post_init(__context)
-class WeibullMoments(WeibullUncertainData):
+class WeibullMoments(WeibullUncertainData): # noqa: D101
mean: pydantic.PositiveFloat
- standardDev: pydantic.PositiveFloat
+ standardDev: pydantic.PositiveFloat # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [self.mean, self.standardDev]
return super().model_post_init(__context)
-class WeibullDataset(WeibullUncertainData):
- dataDir: str
+class WeibullDataset(WeibullUncertainData): # noqa: D101
+ dataDir: str # noqa: N815
- def model_post_init(self, __context: Any) -> None:
+ def model_post_init(self, __context: Any) -> None: # noqa: D102
self.ERAVal = [np.genfromtxt(self.dataDir).tolist()]
return super().model_post_init(__context)
diff --git a/modules/performUQ/common/uq_utilities.py b/modules/performUQ/common/uq_utilities.py
index a828839fc..cfb8c01ab 100644
--- a/modules/performUQ/common/uq_utilities.py
+++ b/modules/performUQ/common/uq_utilities.py
@@ -1,4 +1,4 @@
-import glob
+import glob # noqa: INP001, D100
import os
import shutil
import subprocess
@@ -16,63 +16,63 @@
from numpy.typing import NDArray
-def _copytree(src, dst, symlinks=False, ignore=None):
- if not os.path.exists(dst):
- os.makedirs(dst)
+def _copytree(src, dst, symlinks=False, ignore=None): # noqa: FBT002
+ if not os.path.exists(dst): # noqa: PTH110
+ os.makedirs(dst) # noqa: PTH103
for item in os.listdir(src):
- s = os.path.join(src, item)
- d = os.path.join(dst, item)
- if os.path.isdir(s):
+ s = os.path.join(src, item) # noqa: PTH118
+ d = os.path.join(dst, item) # noqa: PTH118
+ if os.path.isdir(s): # noqa: PTH112
_copytree(s, d, symlinks, ignore)
else:
try:
if (
- not os.path.exists(d)
- or os.stat(s).st_mtime - os.stat(d).st_mtime > 1
+ not os.path.exists(d) # noqa: PTH110
+ or os.stat(s).st_mtime - os.stat(d).st_mtime > 1 # noqa: PTH116
):
shutil.copy2(s, d)
- except Exception as ex:
+ except Exception as ex: # noqa: BLE001
msg = f'Could not copy {s}. The following error occurred: \n{ex}'
- return msg
+ return msg # noqa: RET504
return '0'
def _append_msg_in_out_file(msg, out_file_name: str = 'ops.out'):
- if glob.glob(out_file_name):
- with open(out_file_name) as text_file:
- error_FEM = text_file.read()
+ if glob.glob(out_file_name): # noqa: PTH207
+ with open(out_file_name) as text_file: # noqa: PTH123
+ error_FEM = text_file.read() # noqa: N806
- startingCharId = error_FEM.lower().find('error')
+ startingCharId = error_FEM.lower().find('error') # noqa: N806
if startingCharId > 0:
- startingCharId = max(0, startingCharId - 20)
- endingID = max(len(error_FEM), startingCharId + 200)
+ startingCharId = max(0, startingCharId - 20) # noqa: N806
+ endingID = max(len(error_FEM), startingCharId + 200) # noqa: N806
errmsg = error_FEM[startingCharId:endingID]
errmsg = errmsg.split(' ', 1)[1]
errmsg = errmsg[0 : errmsg.rfind(' ')]
msg += '\n'
msg += 'your model says...\n'
msg += '........\n' + errmsg + '\n........ \n'
- msg += 'to read more, see ' + os.path.join(os.getcwd(), out_file_name)
+ msg += 'to read more, see ' + os.path.join(os.getcwd(), out_file_name) # noqa: PTH109, PTH118
return msg
-class ModelEvaluationError(Exception):
+class ModelEvaluationError(Exception): # noqa: D101
def __init__(self, msg: str) -> None:
super().__init__(msg)
-class SimCenterWorkflowDriver:
+class SimCenterWorkflowDriver: # noqa: D101
def __init__(
self,
- full_path_of_tmpSimCenter_dir: str,
- list_of_dir_names_to_copy_files_from: list[str],
- list_of_rv_names: list[str],
+ full_path_of_tmpSimCenter_dir: str, # noqa: N803
+ list_of_dir_names_to_copy_files_from: list[str], # noqa: FA102
+ list_of_rv_names: list[str], # noqa: FA102
driver_filename: str,
length_of_results: int,
workdir_prefix: str = 'workdir',
- ignore_nans: bool = True,
+ ignore_nans: bool = True, # noqa: FBT001, FBT002
) -> None:
self.full_path_of_tmpSimCenter_dir = full_path_of_tmpSimCenter_dir
self.list_of_dir_names_to_copy_files_from = (
@@ -107,31 +107,31 @@ def _check_size_of_sample(self, sample_values: NDArray) -> None:
raise ModelEvaluationError(msg)
def _create_workdir(self, simulation_number: int) -> str:
- workdir = os.path.join(
+ workdir = os.path.join( # noqa: PTH118
self.full_path_of_tmpSimCenter_dir,
f'{self.workdir_prefix}.{simulation_number + 1}',
)
- if os.path.exists(workdir):
+ if os.path.exists(workdir): # noqa: PTH110
for root, dirs, files in os.walk(workdir):
for file in files:
try:
- os.chmod(os.path.join(root, file), 0o777)
- os.unlink(os.path.join(root, file))
- except:
+ os.chmod(os.path.join(root, file), 0o777) # noqa: S103, PTH101, PTH118
+ os.unlink(os.path.join(root, file)) # noqa: PTH108, PTH118
+ except: # noqa: PERF203, E722
msg = f'Could not remove file {file} from {workdir}.'
- raise ModelEvaluationError(msg)
- for dir in dirs:
+ raise ModelEvaluationError(msg) # noqa: B904
+ for dir in dirs: # noqa: A001
try:
- shutil.rmtree(os.path.join(root, dir))
- except:
+ shutil.rmtree(os.path.join(root, dir)) # noqa: PTH118
+ except: # noqa: PERF203, E722
msg = (
f'Could not remove directory {dir} '
f' from {workdir}.'
)
- raise ModelEvaluationError(msg)
+ raise ModelEvaluationError(msg) # noqa: B904
for src_dir in self.list_of_dir_names_to_copy_files_from:
- src = os.path.join(self.full_path_of_tmpSimCenter_dir, src_dir)
+ src = os.path.join(self.full_path_of_tmpSimCenter_dir, src_dir) # noqa: PTH118
msg = _copytree(src, workdir)
if msg != '0':
raise ModelEvaluationError(msg)
@@ -143,35 +143,35 @@ def _create_params_file(self, sample_values: NDArray, workdir: str) -> None:
for i, rv in enumerate(self.list_of_rv_names):
list_of_strings_to_write.append(f'{rv} {sample_values[0][i]}')
try:
- with open(os.path.join(workdir, 'params.in'), 'w') as f:
+ with open(os.path.join(workdir, 'params.in'), 'w') as f: # noqa: PTH118, PTH123
f.write('\n'.join(list_of_strings_to_write))
- except Exception as ex:
- raise ModelEvaluationError(
- 'Failed to create params.in file in '
+ except Exception as ex: # noqa: BLE001
+ raise ModelEvaluationError( # noqa: B904, TRY003
+ 'Failed to create params.in file in ' # noqa: EM102
f' {workdir}. The following error occurred: \n{ex}'
)
def _execute_driver_file(self, workdir: str) -> None:
command = (
- f'{os.path.join(workdir, self.driver_filename)} '
+ f'{os.path.join(workdir, self.driver_filename)} ' # noqa: PTH118
' 1> model_eval.log 2>&1'
)
os.chdir(workdir)
- completed_process = subprocess.run(command, shell=True, check=False)
+ completed_process = subprocess.run(command, shell=True, check=False) # noqa: S602
try:
completed_process.check_returncode()
except subprocess.CalledProcessError as ex:
- returnStringList = ['Failed to run the model.']
+ returnStringList = ['Failed to run the model.'] # noqa: N806
returnStringList.append(
'The command to run the model was '
f' {ex.cmd}'
)
returnStringList.append(f'The return code was {ex.returncode}')
returnStringList.append(f'The following error occurred: \n{ex}')
- raise ModelEvaluationError('\n\n'.join(returnStringList))
+ raise ModelEvaluationError('\n\n'.join(returnStringList)) # noqa: B904
def _read_outputs_from_results_file(self, workdir: str) -> NDArray:
- if glob.glob('results.out'):
+ if glob.glob('results.out'): # noqa: PTH207
outputs = np.loadtxt('results.out', dtype=float).flatten()
else:
msg = f"Error running FEM: 'results.out' missing at {workdir}\n"
@@ -199,9 +199,9 @@ def _read_outputs_from_results_file(self, workdir: str) -> NDArray:
return outputs
- def evaluate_model_once(
+ def evaluate_model_once( # noqa: D102
self, simulation_number: int, sample_values: NDArray
- ) -> Union[str, NDArray]:
+ ) -> Union[str, NDArray]: # noqa: FA100
outputs = ''
try:
sample_values = np.atleast_2d(sample_values)
@@ -210,10 +210,10 @@ def evaluate_model_once(
self._create_params_file(sample_values, workdir)
self._execute_driver_file(workdir)
outputs = self._read_outputs_from_results_file(workdir)
- except Exception:
+ except Exception: # noqa: BLE001
exc_type, exc_value, exc_traceback = sys.exc_info()
outputs = (
- f'\nSimulation number: {simulation_number}\n'
+ f'\nSimulation number: {simulation_number}\n' # noqa: ISC003
+ f'Samples values: {sample_values}\n'
)
outputs += ''.join(
@@ -224,64 +224,64 @@ def evaluate_model_once(
return outputs
-class ParallelRunnerMultiprocessing:
+class ParallelRunnerMultiprocessing: # noqa: D101
def __init__(self, run_type: str = 'runningLocal') -> None:
self.run_type = run_type
self.num_processors = self.get_num_processors()
self.pool = self.get_pool()
- def get_num_processors(self) -> int:
+ def get_num_processors(self) -> int: # noqa: D102
num_processors = os.cpu_count()
if num_processors is None:
num_processors = 1
if num_processors < 1:
- raise ValueError(
- 'Number of processes must be at least 1. '
+ raise ValueError( # noqa: TRY003
+ 'Number of processes must be at least 1. ' # noqa: EM102
f' Got {num_processors}'
)
return num_processors
- def get_pool(self) -> Pool:
+ def get_pool(self) -> Pool: # noqa: D102
self.pool = Pool(processes=self.num_processors)
return self.pool
- def close_pool(self) -> None:
+ def close_pool(self) -> None: # noqa: D102
self.pool.close()
-def make_ERADist_object(name, opt, val) -> ERADist:
+def make_ERADist_object(name, opt, val) -> ERADist: # noqa: N802, D103
return ERADist(name=name, opt=opt, val=val)
-def create_one_marginal_distribution(rv_data) -> ERADist:
+def create_one_marginal_distribution(rv_data) -> ERADist: # noqa: D103
string = (
- f'quoFEM_RV_models.{rv_data["distribution"]}'
+ f'quoFEM_RV_models.{rv_data["distribution"]}' # noqa: ISC003
+ f'{rv_data["inputType"]}.model_validate({rv_data})'
)
- rv = eval(string)
+ rv = eval(string) # noqa: S307
return make_ERADist_object(name=rv.ERAName, opt=rv.ERAOpt, val=rv.ERAVal)
-def make_list_of_marginal_distributions(
+def make_list_of_marginal_distributions( # noqa: D103
list_of_random_variables_data,
-) -> list[ERADist]:
- marginal_ERAdistribution_objects_list = []
+) -> list[ERADist]: # noqa: FA102
+ marginal_ERAdistribution_objects_list = [] # noqa: N806
for rv_data in list_of_random_variables_data:
- marginal_ERAdistribution_objects_list.append(
+ marginal_ERAdistribution_objects_list.append( # noqa: PERF401
create_one_marginal_distribution(rv_data)
)
return marginal_ERAdistribution_objects_list
-def make_correlation_matrix(correlation_matrix_data, num_rvs) -> NDArray:
+def make_correlation_matrix(correlation_matrix_data, num_rvs) -> NDArray: # noqa: D103
return np.atleast_2d(correlation_matrix_data).reshape((num_rvs, num_rvs))
-def make_ERANataf_object(list_of_ERADist, correlation_matrix) -> ERANataf:
+def make_ERANataf_object(list_of_ERADist, correlation_matrix) -> ERANataf: # noqa: N802, N803, D103
return ERANataf(M=list_of_ERADist, Correlation=correlation_matrix)
-class ERANatafJointDistribution:
+class ERANatafJointDistribution: # noqa: D101
def __init__(
self,
list_of_random_variables_data: list,
@@ -301,37 +301,37 @@ def __init__(
self.marginal_ERAdistribution_objects_list, self.correlation_matrix
)
- def u_to_x(
+ def u_to_x( # noqa: D102
self,
u: NDArray,
- jacobian: bool = False,
- ) -> Union[tuple[NDArray[np.float64], Any], NDArray[np.float64]]:
+ jacobian: bool = False, # noqa: FBT001, FBT002
+ ) -> Union[tuple[NDArray[np.float64], Any], NDArray[np.float64]]: # noqa: FA100, FA102
return self.ERANataf_object.U2X(U=u, Jacobian=jacobian)
- def x_to_u(
+ def x_to_u( # noqa: D102
self,
x: NDArray,
- jacobian: bool = False,
- ) -> Union[
- tuple[NDArray[np.floating[Any]], NDArray[np.floating[Any]]],
+ jacobian: bool = False, # noqa: FBT001, FBT002
+ ) -> Union[ # noqa: FA100
+ tuple[NDArray[np.floating[Any]], NDArray[np.floating[Any]]], # noqa: FA102
NDArray[np.floating[Any]],
]:
return self.ERANataf_object.X2U(X=x, Jacobian=jacobian)
- def pdf(self, x: NDArray) -> Union[Any, NDArray[np.float64]]:
+ def pdf(self, x: NDArray) -> Union[Any, NDArray[np.float64]]: # noqa: FA100, D102
return self.ERANataf_object.pdf(X=x)
- def logpdf(self, x: NDArray) -> NDArray[np.float64]:
+ def logpdf(self, x: NDArray) -> NDArray[np.float64]: # noqa: D102
return np.log(self.pdf(x))
- def cdf(self, x: NDArray) -> float:
+ def cdf(self, x: NDArray) -> float: # noqa: D102
return self.ERANataf_object.cdf(X=x)
- def random(
+ def random( # noqa: D102
self,
- list_of_rngs: list[np.random.Generator] = [],
+ list_of_rngs: list[np.random.Generator] = [], # noqa: B006, FA102
n: int = 1,
- ) -> Union[tuple[NDArray[np.float64], Any], NDArray[np.float64]]:
+ ) -> Union[tuple[NDArray[np.float64], Any], NDArray[np.float64]]: # noqa: FA100, FA102
if list_of_rngs == []:
list_of_rngs = [
np.random.default_rng(seed=i)
@@ -343,36 +343,36 @@ def random(
return self.u_to_x(u)
-def get_list_of_pseudo_random_number_generators(entropy, num_spawn):
+def get_list_of_pseudo_random_number_generators(entropy, num_spawn): # noqa: D103
seed_sequence = np.random.SeedSequence(entropy=entropy).spawn(num_spawn)
prngs = [np.random.Generator(np.random.PCG64DXSM(s)) for s in seed_sequence]
- return prngs
+ return prngs # noqa: RET504
-def get_parallel_pool_instance(run_type: str):
+def get_parallel_pool_instance(run_type: str): # noqa: D103
if run_type == 'runningRemote':
from parallel_runner_mpi4py import ParallelRunnerMPI4PY
return ParallelRunnerMPI4PY(run_type)
- else:
+ else: # noqa: RET505
return ParallelRunnerMultiprocessing(run_type)
-def make_list_of_rv_names(all_rv_data):
+def make_list_of_rv_names(all_rv_data): # noqa: D103
list_of_rv_names = []
for rv_data in all_rv_data:
- list_of_rv_names.append(rv_data['name'])
+ list_of_rv_names.append(rv_data['name']) # noqa: PERF401
return list_of_rv_names
-def get_length_of_results(edp_data):
+def get_length_of_results(edp_data): # noqa: D103
length_of_results = 0
for edp in edp_data:
length_of_results += int(float(edp['length']))
return length_of_results
-def create_default_model(
+def create_default_model( # noqa: D103
run_directory,
list_of_dir_names_to_copy_files_from,
list_of_rv_names,
@@ -388,29 +388,29 @@ def create_default_model(
length_of_results=length_of_results,
workdir_prefix=workdir_prefix,
)
- return model
+ return model # noqa: RET504
-def get_default_model_evaluation_function(model):
+def get_default_model_evaluation_function(model): # noqa: D103
return model.evaluate_model_once
-def get_ERANataf_joint_distribution_instance(
+def get_ERANataf_joint_distribution_instance( # noqa: N802, D103
list_of_rv_data,
correlation_matrix_data,
):
joint_distribution = ERANatafJointDistribution(
list_of_rv_data, correlation_matrix_data
)
- return joint_distribution
+ return joint_distribution # noqa: RET504
-def get_std_normal_to_rv_transformation_function(joint_distribution):
+def get_std_normal_to_rv_transformation_function(joint_distribution): # noqa: D103
transformation_function = joint_distribution.u_to_x
- return transformation_function
+ return transformation_function # noqa: RET504
-def get_default_model(
+def get_default_model( # noqa: D103
list_of_rv_data,
edp_data,
list_of_dir_names_to_copy_files_from,
@@ -420,9 +420,9 @@ def get_default_model(
):
list_of_rv_names = make_list_of_rv_names(list_of_rv_data)
length_of_results = get_length_of_results(edp_data)
- list_of_dir_names_to_copy_files_from = list_of_dir_names_to_copy_files_from
- driver_filename = driver_filename
- workdir_prefix = workdir_prefix
+ list_of_dir_names_to_copy_files_from = list_of_dir_names_to_copy_files_from # noqa: PLW0127
+ driver_filename = driver_filename # noqa: PLW0127
+ workdir_prefix = workdir_prefix # noqa: PLW0127
model = create_default_model(
run_directory,
@@ -432,33 +432,33 @@ def get_default_model(
length_of_results,
workdir_prefix,
)
- return model
+ return model # noqa: RET504
-def model_evaluation_function(
+def model_evaluation_function( # noqa: D103
func,
list_of_iterables,
):
return func(*list_of_iterables)
-def get_random_number_generators(entropy, num_prngs):
+def get_random_number_generators(entropy, num_prngs): # noqa: D103
return get_list_of_pseudo_random_number_generators(entropy, num_prngs)
-def get_standard_normal_random_variates(list_of_prngs, size=1):
+def get_standard_normal_random_variates(list_of_prngs, size=1): # noqa: D103
return [prng.standard_normal(size=size) for prng in list_of_prngs]
-def get_inverse_gamma_random_variate(prng, shape, scale, size=1):
+def get_inverse_gamma_random_variate(prng, shape, scale, size=1): # noqa: D103
return scipy.stats.invgamma.rvs(shape, scale=scale, size=size, random_state=prng)
-def multivariate_normal_logpdf(x, mean, cov):
+def multivariate_normal_logpdf(x, mean, cov): # noqa: D103
eigenvalues, eigenvectors = np.linalg.eigh(cov)
logdet = np.sum(np.log(eigenvalues))
valsinv = 1.0 / eigenvalues
- U = eigenvectors * np.sqrt(valsinv)
+ U = eigenvectors * np.sqrt(valsinv) # noqa: N806
dim = len(eigenvalues)
dev = x - mean
maha = np.square(dev.T @ U).sum()
@@ -467,7 +467,7 @@ def multivariate_normal_logpdf(x, mean, cov):
@dataclass
-class NormalInverseWishartParameters:
+class NormalInverseWishartParameters: # noqa: D101
mu_vector: npt.NDArray
lambda_scalar: float
nu_scalar: float
@@ -475,7 +475,7 @@ class NormalInverseWishartParameters:
@dataclass
-class InverseGammaParameters:
+class InverseGammaParameters: # noqa: D101
alpha_scalar: float
beta_scalar: float
@@ -495,7 +495,7 @@ def _get_tabular_results_file_name_for_dataset(
tabular_results_parent
/ f'{tabular_results_stem}_dataset_{dataset_number + 1}{tabular_results_extension}'
)
- return tabular_results_file
+ return tabular_results_file # noqa: RET504
def _write_to_tabular_results_file(tabular_results_file, string_to_write):
diff --git a/modules/performUQ/dakota/DakotaFEM.py b/modules/performUQ/dakota/DakotaFEM.py
index 73dd3f8a5..9529299f3 100644
--- a/modules/performUQ/dakota/DakotaFEM.py
+++ b/modules/performUQ/dakota/DakotaFEM.py
@@ -1,8 +1,8 @@
-import json
+import json # noqa: INP001, D100
import os
import sys
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
import argparse
import platform
@@ -14,25 +14,25 @@
from preprocessJSON import preProcessDakota
-def str2bool(v):
+def str2bool(v): # noqa: D103
# courtesy of Maxim @ stackoverflow
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 'True', 't', 'y', '1'):
return True
- elif v.lower() in ('no', 'false', 'False', 'f', 'n', '0'):
+ elif v.lower() in ('no', 'false', 'False', 'f', 'n', '0'): # noqa: RET505
return False
else:
- raise argparse.ArgumentTypeError('Boolean value expected.')
+ raise argparse.ArgumentTypeError('Boolean value expected.') # noqa: EM101, TRY003
-def main(args):
+def main(args): # noqa: C901, D103
# First we need to set the path and environment
- home = os.path.expanduser('~')
+ home = os.path.expanduser('~') # noqa: PTH111
env = os.environ
if os.getenv('PEGASUS_WF_UUID') is not None:
- print('Pegasus job detected - Pegasus will set up the env')
+ print('Pegasus job detected - Pegasus will set up the env') # noqa: T201
elif platform.system() == 'Darwin':
env['PATH'] = env['PATH'] + f':{home}/bin'
env['PATH'] = env['PATH'] + f':{home}/dakota/bin'
@@ -42,7 +42,7 @@ def main(args):
elif platform.system() == 'Windows':
pass
else:
- print(f'PLATFORM {platform.system} NOT RECOGNIZED')
+ print(f'PLATFORM {platform.system} NOT RECOGNIZED') # noqa: T201
parser = argparse.ArgumentParser()
@@ -72,14 +72,14 @@ def main(args):
args, unknowns = parser.parse_known_args()
# Reading input arguments
- aimName = args.filenameAIM
- samName = args.filenameSAM
- evtName = args.filenameEVENT
- edpName = args.filenameEDP
- simName = args.filenameSIM
- driverFile = args.driverFile
-
- uqData = dict(
+ aimName = args.filenameAIM # noqa: N806
+ samName = args.filenameSAM # noqa: N806
+ evtName = args.filenameEVENT # noqa: N806
+ edpName = args.filenameEDP # noqa: N806
+ simName = args.filenameSIM # noqa: N806
+ driverFile = args.driverFile # noqa: N806
+
+ uqData = dict( # noqa: C408, N806
method=args.method,
samples=args.samples,
samples2=args.samples2,
@@ -95,36 +95,36 @@ def main(args):
if (
uqData['samples'] is None
): # this happens when the uq details are stored at the wrong place in the AIM file
- with open(aimName, encoding='utf-8') as data_file:
+ with open(aimName, encoding='utf-8') as data_file: # noqa: PTH123
uq_info = json.load(data_file)['UQ']
- if 'samplingMethodData' in uq_info.keys():
+ if 'samplingMethodData' in uq_info.keys(): # noqa: SIM118
uq_info = uq_info['samplingMethodData']
for attribute in uqData:
if attribute not in ['concurrency', 'keepSamples']:
uqData[attribute] = uq_info.get(attribute, None)
- runDakota = args.runType
+ runDakota = args.runType # noqa: N806
# Run Preprocess for Dakota
- scriptDir = os.path.dirname(os.path.realpath(__file__))
- numRVs = preProcessDakota(
+ scriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N806, F841
+ numRVs = preProcessDakota( # noqa: N806, F841
aimName, evtName, samName, edpName, simName, driverFile, runDakota, uqData
)
# Setting Workflow Driver Name
- workflowDriverName = 'workflow_driver'
+ workflowDriverName = 'workflow_driver' # noqa: N806
if (platform.system() == 'Windows') and (runDakota == 'run'):
- workflowDriverName = 'workflow_driver.bat'
+ workflowDriverName = 'workflow_driver.bat' # noqa: N806
# Create Template Directory and copy files
- st = os.stat(workflowDriverName)
- os.chmod(workflowDriverName, st.st_mode | stat.S_IEXEC)
+ st = os.stat(workflowDriverName) # noqa: PTH116
+ os.chmod(workflowDriverName, st.st_mode | stat.S_IEXEC) # noqa: PTH101
# shutil.copy(workflowDriverName, "templatedir")
# shutil.copy("{}/dpreproSimCenter".format(scriptDir), os.getcwd())
shutil.move(aimName, 'aim.j')
shutil.move(evtName, 'evt.j')
- if os.path.isfile(samName):
+ if os.path.isfile(samName): # noqa: PTH113
shutil.move(samName, 'sam.j')
shutil.move(edpName, 'edp.j')
# if os.path.isfile(simName): shutil.move(simName, "sim.j")
@@ -136,12 +136,12 @@ def main(args):
os.chdir('../')
if runDakota == 'run':
- dakotaCommand = (
+ dakotaCommand = ( # noqa: N806
'dakota -input dakota.in -output dakota.out -error dakota.err'
)
- print('running Dakota: ', dakotaCommand)
+ print('running Dakota: ', dakotaCommand) # noqa: T201
try:
- result = subprocess.check_output(
+ result = subprocess.check_output( # noqa: S602
dakotaCommand, stderr=subprocess.STDOUT, shell=True
)
returncode = 0
@@ -153,7 +153,7 @@ def main(args):
if platform.system() == 'Windows':
result = result.decode(sys.stdout.encoding)
- print(result, returncode)
+ print(result, returncode) # noqa: T201
if __name__ == '__main__':
diff --git a/modules/performUQ/dakota/DakotaFEM1.py b/modules/performUQ/dakota/DakotaFEM1.py
index 13500954d..a6f7cca18 100644
--- a/modules/performUQ/dakota/DakotaFEM1.py
+++ b/modules/performUQ/dakota/DakotaFEM1.py
@@ -1,14 +1,14 @@
-# import functions for Python 2.X support
+# import functions for Python 2.X support # noqa: INP001, D100
import os
import sys
if sys.version.startswith('2'):
- range = xrange
- string_types = basestring
+ range = xrange # noqa: A001, F821
+ string_types = basestring # noqa: F821
else:
string_types = str
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))) # noqa: PTH120
import argparse
import platform
@@ -20,12 +20,12 @@
from preprocessJSON import preProcessDakota
-def main(args):
+def main(args): # noqa: D103
# First we need to set the path and environment
- home = os.path.expanduser('~')
+ home = os.path.expanduser('~') # noqa: PTH111
env = os.environ
if os.getenv('PEGASUS_WF_UUID') is not None:
- print('Pegasus job detected - Pegasus will set up the env')
+ print('Pegasus job detected - Pegasus will set up the env') # noqa: T201
elif platform.system() == 'Darwin':
env['PATH'] = env['PATH'] + f':{home}/bin'
env['PATH'] = env['PATH'] + f':{home}/dakota/bin'
@@ -35,7 +35,7 @@ def main(args):
elif platform.system() == 'Windows':
pass
else:
- print(f'PLATFORM {platform.system} NOT RECOGNIZED')
+ print(f'PLATFORM {platform.system} NOT RECOGNIZED') # noqa: T201
parser = argparse.ArgumentParser()
@@ -68,14 +68,14 @@ def main(args):
args, unknowns = parser.parse_known_args()
# Reading input arguments
- aimName = args.filenameBIM
- samName = args.filenameSAM
- evtName = args.filenameEVENT
- edpName = args.filenameEDP
- simName = args.filenameSIM
- driverFile = args.driverFile
-
- uqData = dict(
+ aimName = args.filenameBIM # noqa: N806
+ samName = args.filenameSAM # noqa: N806
+ evtName = args.filenameEVENT # noqa: N806
+ edpName = args.filenameEDP # noqa: N806
+ simName = args.filenameSIM # noqa: N806
+ driverFile = args.driverFile # noqa: N806
+
+ uqData = dict( # noqa: C408, N806
method=args.method,
samples=args.samples,
seed=args.seed,
@@ -92,23 +92,23 @@ def main(args):
not in ['False', 'False', 'false', 'false', False],
)
- runDakota = args.runType
+ runDakota = args.runType # noqa: N806
- myScriptDir = os.path.dirname(os.path.realpath(__file__))
+ myScriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N806
# desktop applications
if (
uqData['samples'] is None
): # this happens with new applications, workflow to change
- print('RUNNING PREPROCESSOR\n')
- osType = platform.system()
- preprocessorCommand = f'"{myScriptDir}/preprocessDakota" {aimName} {samName} {evtName} {edpName} {simName} {driverFile} {runDakota} {osType}'
- subprocess.Popen(preprocessorCommand, shell=True).wait()
- print('DONE RUNNING PREPROCESSOR\n')
+ print('RUNNING PREPROCESSOR\n') # noqa: T201
+ osType = platform.system() # noqa: N806
+ preprocessorCommand = f'"{myScriptDir}/preprocessDakota" {aimName} {samName} {evtName} {edpName} {simName} {driverFile} {runDakota} {osType}' # noqa: N806
+ subprocess.Popen(preprocessorCommand, shell=True).wait() # noqa: S602
+ print('DONE RUNNING PREPROCESSOR\n') # noqa: T201
else:
- scriptDir = os.path.dirname(os.path.realpath(__file__))
- numRVs = preProcessDakota(
+ scriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N806, F841
+ numRVs = preProcessDakota( # noqa: N806, F841
aimName,
evtName,
samName,
@@ -121,18 +121,18 @@ def main(args):
shutil.move(aimName, 'aim.j')
shutil.move(evtName, 'evt.j')
- if os.path.isfile(samName):
+ if os.path.isfile(samName): # noqa: PTH113
shutil.move(samName, 'sam.j')
shutil.move(edpName, 'edp.j')
# Setting Workflow Driver Name
- workflowDriverName = 'workflow_driver'
+ workflowDriverName = 'workflow_driver' # noqa: N806
if (platform.system() == 'Windows') and (runDakota == 'runningLocal'):
- workflowDriverName = 'workflow_driver.bat'
+ workflowDriverName = 'workflow_driver.bat' # noqa: N806
# Change permission of workflow driver
- st = os.stat(workflowDriverName)
- os.chmod(workflowDriverName, st.st_mode | stat.S_IEXEC)
+ st = os.stat(workflowDriverName) # noqa: PTH116
+ os.chmod(workflowDriverName, st.st_mode | stat.S_IEXEC) # noqa: PTH101
# copy the dakota input file to the main working dir for the structure
shutil.move('dakota.in', '../')
@@ -141,18 +141,18 @@ def main(args):
os.chdir('../')
if runDakota == 'runningLocal':
- dakotaCommand = (
+ dakotaCommand = ( # noqa: N806
'dakota -input dakota.in -output dakota.out -error dakota.err'
)
- print('running Dakota: ', dakotaCommand)
+ print('running Dakota: ', dakotaCommand) # noqa: T201
try:
- result = subprocess.check_output(
+ result = subprocess.check_output( # noqa: S602
dakotaCommand, stderr=subprocess.STDOUT, shell=True
)
returncode = 0
except subprocess.CalledProcessError as e:
- result = e.output
- returncode = e.returncode
+ result = e.output # noqa: F841
+ returncode = e.returncode # noqa: F841
if __name__ == '__main__':
diff --git a/modules/performUQ/dakota/DakotaUQ.py b/modules/performUQ/dakota/DakotaUQ.py
index 7718b86bf..318cd12b7 100644
--- a/modules/performUQ/dakota/DakotaUQ.py
+++ b/modules/performUQ/dakota/DakotaUQ.py
@@ -1,4 +1,4 @@
-# written: UQ team @ SimCenter
+# written: UQ team @ SimCenter # noqa: INP001, D100
# import functions for Python 2.X support
# from __future__ import division, print_function
@@ -19,7 +19,7 @@
import sys
-def main(args):
+def main(args): # noqa: C901, D103
parser = argparse.ArgumentParser()
parser.add_argument('--workflowInput')
@@ -29,78 +29,78 @@ def main(args):
args, unknowns = parser.parse_known_args()
- inputFile = args.workflowInput
- runType = args.runType
+ inputFile = args.workflowInput # noqa: N806
+ runType = args.runType # noqa: N806
workflow_driver = args.driverFile
- outputFile = args.workflowOutput
+ outputFile = args.workflowOutput # noqa: N806, F841
#
# open input file and check for any rvFiles
# - need to know in case need to modify driver file
#
- with open(inputFile, encoding='utf-8') as f:
+ with open(inputFile, encoding='utf-8') as f: # noqa: PTH123
data = json.load(f)
workflow_driver1 = 'blank'
# run on local computer
- osType = platform.system()
+ osType = platform.system() # noqa: N806
if runType == 'runningLocal':
if (
sys.platform == 'darwin'
or sys.platform == 'linux'
or sys.platform == 'linux2'
):
- Dakota = 'dakota'
+ Dakota = 'dakota' # noqa: N806
workflow_driver1 = 'workflow_driver1'
- osType = 'Linux'
+ osType = 'Linux' # noqa: N806
else:
- Dakota = 'dakota'
+ Dakota = 'dakota' # noqa: N806
workflow_driver = workflow_driver + '.bat'
workflow_driver1 = 'workflow_driver1.bat'
- osType = 'Windows'
+ osType = 'Windows' # noqa: N806
elif runType == 'runningRemote':
- Dakota = 'dakota'
+ Dakota = 'dakota' # noqa: N806
workflow_driver1 = 'workflow_driver1'
- osType = 'Linux'
+ osType = 'Linux' # noqa: N806
- cwd = os.getcwd()
- print('CWD: ' + cwd)
+ cwd = os.getcwd() # noqa: PTH109
+ print('CWD: ' + cwd) # noqa: T201
- thisScriptDir = os.path.dirname(os.path.realpath(__file__))
+ thisScriptDir = os.path.dirname(os.path.realpath(__file__)) # noqa: PTH120, N806
- preprocessorCommand = f'"{thisScriptDir}/preprocessDakota" "{inputFile}" "{workflow_driver}" "{workflow_driver1}" "{runType}" "{osType}" '
+ preprocessorCommand = f'"{thisScriptDir}/preprocessDakota" "{inputFile}" "{workflow_driver}" "{workflow_driver1}" "{runType}" "{osType}" ' # noqa: N806
- subprocess.Popen(preprocessorCommand, shell=True).wait()
+ subprocess.Popen(preprocessorCommand, shell=True).wait() # noqa: S602
if runType == 'runningLocal':
- os.chmod(
+ os.chmod( # noqa: PTH101
workflow_driver,
stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR | stat.S_IXOTH,
)
- os.chmod(
+ os.chmod( # noqa: PTH101
workflow_driver1,
stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR | stat.S_IXOTH,
)
- command = Dakota + ' -input dakota.in -output dakota.out -error dakota.err'
+ command = Dakota + ' -input dakota.in -output dakota.out -error dakota.err' # noqa: F841
# Change permission of workflow driver
- st = os.stat(workflow_driver)
- os.chmod(workflow_driver, st.st_mode | stat.S_IEXEC)
- os.chmod(workflow_driver1, st.st_mode | stat.S_IEXEC)
+ st = os.stat(workflow_driver) # noqa: PTH116
+ os.chmod(workflow_driver, st.st_mode | stat.S_IEXEC) # noqa: PTH101
+ os.chmod(workflow_driver1, st.st_mode | stat.S_IEXEC) # noqa: PTH101
# copy the dakota input file to the main working dir for the structure
shutil.copy('dakota.in', '../')
# If calibration data files exist, copy to the main working directory
- if os.path.isfile('calibrationDataFilesToMove.cal'):
- calDataFileList = open('calibrationDataFilesToMove.cal')
- datFileList = calDataFileList.readlines()
+ if os.path.isfile('calibrationDataFilesToMove.cal'): # noqa: PTH113
+ calDataFileList = open('calibrationDataFilesToMove.cal') # noqa: SIM115, PTH123, N806
+ datFileList = calDataFileList.readlines() # noqa: N806
for line in datFileList:
- datFile = line.strip()
+ datFile = line.strip() # noqa: N806
if datFile.split('.')[-1] == 'tmpFile':
shutil.copy(datFile, f'../{datFile[:-8]}')
else:
@@ -111,41 +111,41 @@ def main(args):
# change dir to the main working dir for the structure
os.chdir('../')
- cwd = os.getcwd()
+ cwd = os.getcwd() # noqa: PTH109
if runType == 'runningLocal':
# p = Popen(command, stdout=PIPE, stderr=PIPE, shell=True)
# for line in p.stdout:
# print(str(line))
- dakotaCommand = (
+ dakotaCommand = ( # noqa: N806
'dakota -input dakota.in -output dakota.out -error dakota.err'
)
if 'parType' in data:
- print(data['parType'])
+ print(data['parType']) # noqa: T201
if data['parType'] == 'parRUN':
- dakotaCommand = data['mpiExec'] + ' -n 1 ' + dakotaCommand
+ dakotaCommand = data['mpiExec'] + ' -n 1 ' + dakotaCommand # noqa: N806
- print('running Dakota: ', dakotaCommand)
+ print('running Dakota: ', dakotaCommand) # noqa: T201
try:
- result = subprocess.check_output(
+ result = subprocess.check_output( # noqa: S602
dakotaCommand, stderr=subprocess.STDOUT, shell=True
)
returncode = 0
except subprocess.CalledProcessError as e:
result = e.output
- print('RUNNING DAKOTA ERROR: ', result)
- returncode = e.returncode
+ print('RUNNING DAKOTA ERROR: ', result) # noqa: T201
+ returncode = e.returncode # noqa: F841
- dakotaErrFile = os.path.join(os.getcwd(), 'dakota.err')
- dakotaOutFile = os.path.join(os.getcwd(), 'dakota.out')
- checkErrFile = os.path.getsize(dakotaErrFile)
- checkOutFile = os.path.exists(dakotaOutFile)
+ dakotaErrFile = os.path.join(os.getcwd(), 'dakota.err') # noqa: PTH109, PTH118, N806
+ dakotaOutFile = os.path.join(os.getcwd(), 'dakota.out') # noqa: PTH109, PTH118, N806
+ checkErrFile = os.path.getsize(dakotaErrFile) # noqa: PTH202, N806
+ checkOutFile = os.path.exists(dakotaOutFile) # noqa: PTH110, N806
- if checkOutFile == False and checkErrFile == 0:
- with open(dakotaErrFile, 'a') as file:
+ if checkOutFile == False and checkErrFile == 0: # noqa: E712
+ with open(dakotaErrFile, 'a') as file: # noqa: PTH123
file.write(result.decode('utf-8'))
else:
pass
diff --git a/modules/performUQ/other/HeirBayesRunner.py b/modules/performUQ/other/HeirBayesRunner.py
index a2ea9097a..078558479 100644
--- a/modules/performUQ/other/HeirBayesRunner.py
+++ b/modules/performUQ/other/HeirBayesRunner.py
@@ -1,4 +1,4 @@
-# written: Aakash Bangalore Satish @ NHERI SimCenter, UC Berkeley
+# written: Aakash Bangalore Satish @ NHERI SimCenter, UC Berkeley # noqa: INP001, D100
import importlib
import json
@@ -9,7 +9,7 @@
from uqRunner import UqRunner
-class HeirBayesRunner(UqRunner):
+class HeirBayesRunner(UqRunner): # noqa: D101
def __init__(self) -> None:
super().__init__()
self.n_samples = 0
@@ -17,7 +17,7 @@ def __init__(self) -> None:
self.tuning_interval = 0
self.seed = 0
- def storeUQData(self, uqData):
+ def storeUQData(self, uqData): # noqa: N802, N803, D102
for val in uqData['Parameters']:
if val['name'] == 'File To Run':
self.file_to_run = val['value']
@@ -30,10 +30,10 @@ def storeUQData(self, uqData):
elif val['name'] == 'Seed':
self.seed = int(val['value'])
- def performHeirBayesSampling(self):
- self.dir_name = os.path.dirname(self.file_to_run)
+ def performHeirBayesSampling(self): # noqa: N802, D102
+ self.dir_name = os.path.dirname(self.file_to_run) # noqa: PTH120
sys.path.append(self.dir_name)
- module_name = os.path.basename(self.file_to_run)
+ module_name = os.path.basename(self.file_to_run) # noqa: PTH119
module = importlib.import_module(module_name[:-3])
self.heir_code = module.HeirBayesSampler()
@@ -46,12 +46,12 @@ def performHeirBayesSampling(self):
)
)
- def saveResultsToPklFile(self):
+ def saveResultsToPklFile(self): # noqa: N802, D102
self.saved_pickle_filename = self.heir_code.save_results(
self.trace, self.time_taken, self.inf_object, prefix='synthetic_data'
)
- def createHeadingStringsList(self):
+ def createHeadingStringsList(self): # noqa: N802, D102
self.params = ['fy', 'E', 'b', 'cR1', 'cR2', 'a1', 'a3']
self.num_params = len(self.params)
@@ -69,15 +69,15 @@ def createHeadingStringsList(self):
)
for par in self.params:
- self.heading_list.append(''.join(['Mean_', par]))
+ self.heading_list.append(''.join(['Mean_', par])) # noqa: FLY002
for sig in range(self.num_coupons):
self.heading_list.append(''.join(['ErrorVariance_', str(sig + 1)]))
- def makeHeadingRow(self, separator='\t'):
- self.headingRow = separator.join([item for item in self.heading_list])
+ def makeHeadingRow(self, separator='\t'): # noqa: N802, D102
+ self.headingRow = separator.join([item for item in self.heading_list]) # noqa: C416
- def makeOneRowString(self, sample_num, sample, separator='\t'):
+ def makeOneRowString(self, sample_num, sample, separator='\t'): # noqa: N802, D102
initial_string = separator.join([str(sample_num), '1'])
coupon_string = separator.join(
[
@@ -105,9 +105,9 @@ def makeOneRowString(self, sample_num, sample, separator='\t'):
row_string = separator.join(
[initial_string, coupon_string, cov_string, mean_string, error_string]
)
- return row_string
+ return row_string # noqa: RET504
- def makeTabularResultsFile(
+ def makeTabularResultsFile( # noqa: N802, D102
self,
save_file_name='tabularResults.out',
separator='\t',
@@ -115,10 +115,10 @@ def makeTabularResultsFile(
self.createHeadingStringsList()
self.makeHeadingRow(separator=separator)
- cwd = os.getcwd()
- save_file_dir = os.path.dirname(cwd)
- save_file_full_path = os.path.join(save_file_dir, save_file_name)
- with open(save_file_full_path, 'w') as f:
+ cwd = os.getcwd() # noqa: PTH109
+ save_file_dir = os.path.dirname(cwd) # noqa: PTH120
+ save_file_full_path = os.path.join(save_file_dir, save_file_name) # noqa: PTH118
+ with open(save_file_full_path, 'w') as f: # noqa: PTH123
f.write(self.headingRow)
f.write('\n')
for sample_num, sample in enumerate(self.trace):
@@ -128,43 +128,43 @@ def makeTabularResultsFile(
f.write(row)
f.write('\n')
- def startTimer(self):
+ def startTimer(self): # noqa: N802, D102
self.startingTime = time.time()
- def computeTimeElapsed(self):
+ def computeTimeElapsed(self): # noqa: N802, D102
self.timeElapsed = time.time() - self.startingTime
- def printTimeElapsed(self):
+ def printTimeElapsed(self): # noqa: N802, D102
self.computeTimeElapsed()
- print(f'Time elapsed: {self.timeElapsed / 60:0.2f} minutes')
+ print(f'Time elapsed: {self.timeElapsed / 60:0.2f} minutes') # noqa: T201
- def startSectionTimer(self):
+ def startSectionTimer(self): # noqa: N802, D102
self.sectionStartingTime = time.time()
- def resetSectionTimer(self):
+ def resetSectionTimer(self): # noqa: N802, D102
self.startSectionTimer()
- def computeSectionTimeElapsed(self):
+ def computeSectionTimeElapsed(self): # noqa: N802, D102
self.sectionTimeElapsed = time.time() - self.sectionStartingTime
- def printSectionTimeElapsed(self):
+ def printSectionTimeElapsed(self): # noqa: N802, D102
self.computeSectionTimeElapsed()
- print(f'Time elapsed: {self.sectionTimeElapsed / 60:0.2f} minutes')
+ print(f'Time elapsed: {self.sectionTimeElapsed / 60:0.2f} minutes') # noqa: T201
@staticmethod
- def printEndMessages():
- print('Heirarchical Bayesian estimation done!')
+ def printEndMessages(): # noqa: N802, D102
+ print('Heirarchical Bayesian estimation done!') # noqa: T201
- def runUQ(
+ def runUQ( # noqa: N802
self,
- uqData,
- simulationData,
- randomVarsData,
- demandParams,
- workingDir,
- runType,
- localAppDir,
- remoteAppDir,
+ uqData, # noqa: N803
+ simulationData, # noqa: ARG002, N803
+ randomVarsData, # noqa: ARG002, N803
+ demandParams, # noqa: ARG002, N803
+ workingDir, # noqa: N803
+ runType, # noqa: ARG002, N803
+ localAppDir, # noqa: ARG002, N803
+ remoteAppDir, # noqa: ARG002, N803
):
"""This function configures and runs hierarchical Bayesian estimation based on the
input UQ configuration, simulation configuration, random variables,
@@ -181,7 +181,7 @@ def runUQ(
runType: Specifies whether computations are being run locally or on an HPC cluster
localAppDir: Directory containing apps for local run
remoteAppDir: Directory containing apps for remote run
- """
+ """ # noqa: D205, D400, D401, D404
self.startTimer()
self.storeUQData(uqData=uqData)
os.chdir(workingDir)
@@ -192,15 +192,15 @@ def runUQ(
self.printEndMessages()
-class testRunUQ:
+class testRunUQ: # noqa: D101
def __init__(self, json_file_path_string) -> None:
self.json_file_path_string = json_file_path_string
self.getUQData()
self.createRunner()
self.runTest()
- def getUQData(self):
- with open(os.path.abspath(self.json_file_path_string)) as f:
+ def getUQData(self): # noqa: N802, D102
+ with open(os.path.abspath(self.json_file_path_string)) as f: # noqa: PTH100, PTH123
input_data = json.load(f)
self.ApplicationData = input_data['Applications']
@@ -211,15 +211,15 @@ def getUQData(self):
self.localAppDir = input_data['localAppDir']
self.remoteAppDir = input_data['remoteAppDir']
self.workingDir = input_data['workingDir']
- self.workingDir = os.path.join(
+ self.workingDir = os.path.join( # noqa: PTH118
self.workingDir, 'tmp.SimCenter', 'templateDir'
)
self.runType = 'runningLocal'
- def createRunner(self):
+ def createRunner(self): # noqa: N802, D102
self.runner = HeirBayesRunner()
- def runTest(self):
+ def runTest(self): # noqa: N802, D102
self.runner.runUQ(
uqData=self.uqData,
simulationData=self.simulationData,
@@ -232,17 +232,17 @@ def runTest(self):
)
-def main():
- filename = os.path.abspath(
- os.path.join(
- os.path.dirname(__file__),
+def main(): # noqa: D103
+ filename = os.path.abspath( # noqa: PTH100
+ os.path.join( # noqa: PTH118
+ os.path.dirname(__file__), # noqa: PTH120
'test_CustomUQ/HeirBayesSyntheticData/templatedir/scInput.json',
)
)
- if os.path.exists(filename):
+ if os.path.exists(filename): # noqa: PTH110
testRunUQ(filename)
else:
- print(f'Test input json file {filename} not found. Not running the test.')
+ print(f'Test input json file {filename} not found. Not running the test.') # noqa: T201
if __name__ == '__main__':
diff --git a/modules/performUQ/other/UQpyRunner.py b/modules/performUQ/other/UQpyRunner.py
index 1c913a333..1667cf7b0 100644
--- a/modules/performUQ/other/UQpyRunner.py
+++ b/modules/performUQ/other/UQpyRunner.py
@@ -1,4 +1,4 @@
-# written: Michael Gardner @ UNR
+# written: Michael Gardner @ UNR # noqa: INP001, D100
# updated Aakash Bangalore Satish, June 11 2024
import os
@@ -13,21 +13,21 @@
# THIS IS FOR WHEN MESSING AROUND WITH UQpy SOURCE
# import sys
# sys.path.append(os.path.abspath("/home/michael/UQpy/src"))
-from UQpy.sampling.MonteCarloSampling import MonteCarloSampling as MCS
+from UQpy.sampling.MonteCarloSampling import MonteCarloSampling as MCS # noqa: N817
from uqRunner import UqRunner
-class UQpyRunner(UqRunner):
- def runUQ(
+class UQpyRunner(UqRunner): # noqa: D101
+ def runUQ( # noqa: C901, N802
self,
- uqData,
- simulationData,
- randomVarsData,
- demandParams,
- workingDir,
- runType,
- localAppDir,
- remoteAppDir,
+ uqData, # noqa: N803
+ simulationData, # noqa: ARG002, N803
+ randomVarsData, # noqa: N803
+ demandParams, # noqa: N803
+ workingDir, # noqa: N803
+ runType, # noqa: N803
+ localAppDir, # noqa: N803
+ remoteAppDir, # noqa: ARG002, N803
):
"""This function configures and runs a UQ simulation using UQpy based on the
input UQ configuration, simulation configuration, random variables,
@@ -44,53 +44,53 @@ def runUQ(
runType: Specifies whether computations are being run locally or on an HPC cluster
localAppDir: Directory containing apps for local run
remoteAppDir: Directory containing apps for remote run
- """
+ """ # noqa: D205, D400, D401, D404
# There is still plenty of configuration that can and should be added here. This currently does MCS sampling with Uniform
# distributions only, though this is easily expanded
# Copy required python files to template directory
shutil.copyfile(
- os.path.join(
+ os.path.join( # noqa: PTH118
localAppDir, 'applications/performUQ/other/runWorkflowDriver.py'
),
- os.path.join(workingDir, 'runWorkflowDriver.py'),
+ os.path.join(workingDir, 'runWorkflowDriver.py'), # noqa: PTH118
)
shutil.copyfile(
- os.path.join(
+ os.path.join( # noqa: PTH118
localAppDir, 'applications/performUQ/other/createTemplate.py'
),
- os.path.join(workingDir, 'createTemplate.py'),
+ os.path.join(workingDir, 'createTemplate.py'), # noqa: PTH118
)
shutil.copyfile(
- os.path.join(
+ os.path.join( # noqa: PTH118
localAppDir, 'applications/performUQ/other/processUQpyOutput.py'
),
- os.path.join(workingDir, 'processUQpyOutput.py'),
+ os.path.join(workingDir, 'processUQpyOutput.py'), # noqa: PTH118
)
# Parse configuration for UQ
- distributionNames = []
- distributionParams = []
- variableNames = []
- distributionObjects = []
+ distributionNames = [] # noqa: N806
+ distributionParams = [] # noqa: N806
+ variableNames = [] # noqa: N806
+ distributionObjects = [] # noqa: N806
samples = []
- samplingMethod = ''
- numberOfSamples = 0
- modelScript = 'runWorkflowDriver.py'
- inputTemplate = 'params.template'
+ samplingMethod = '' # noqa: N806
+ numberOfSamples = 0 # noqa: N806
+ modelScript = 'runWorkflowDriver.py' # noqa: N806
+ inputTemplate = 'params.template' # noqa: N806
# outputObjectName = 'OutputProcessor'
- outputObjectName = 'output_function'
- outputScript = 'processUQpyOutput.py'
- numberOfTasks = 1
- numberOfNodes = 1
- coresPerTask = 1
- clusterRun = False
- resumeRun = False
+ outputObjectName = 'output_function' # noqa: N806
+ outputScript = 'processUQpyOutput.py' # noqa: N806
+ numberOfTasks = 1 # noqa: N806
+ numberOfNodes = 1 # noqa: N806
+ coresPerTask = 1 # noqa: N806
+ clusterRun = False # noqa: N806
+ resumeRun = False # noqa: N806, F841
seed = 1
# If computations are being executed on HPC, enable UQpy to start computations using srun
if runType == 'runningRemote':
- clusterRun = True
+ clusterRun = True # noqa: N806, F841
for val in randomVarsData:
if val['distribution'] == 'Uniform':
@@ -99,31 +99,31 @@ def runUQ(
distributionParams.append([val['lowerbound'], val['upperbound']])
else:
raise OSError(
- "ERROR: You'll need to update UQpyRunner.py to run your"
+ "ERROR: You'll need to update UQpyRunner.py to run your" # noqa: ISC003
+ ' specified RV distribution!'
)
for val in uqData['Parameters']:
if val['name'] == 'Sampling Method':
- samplingMethod = val['value']
+ samplingMethod = val['value'] # noqa: N806
if val['name'] == 'Number of Samples':
- numberOfSamples = int(val['value'])
+ numberOfSamples = int(val['value']) # noqa: N806
if val['name'] == 'Number of Concurrent Tasks':
- numberOfTasks = val['value']
+ numberOfTasks = val['value'] # noqa: N806
if val['name'] == 'Number of Nodes':
- numberOfNodes = val['value']
+ numberOfNodes = val['value'] # noqa: N806, F841
if val['name'] == 'Cores per Task':
- coresPerTask = val['value']
+ coresPerTask = val['value'] # noqa: N806, F841
if val['name'] == 'Seed':
seed = int(val['value'])
# Create distribution objects
- for index, val in enumerate(distributionNames, 0):
+ for index, val in enumerate(distributionNames, 0): # noqa: B007
distributionObjects.append(
Uniform(
distributionParams[index][0],
@@ -140,7 +140,7 @@ def runUQ(
)
else:
raise OSError(
- "ERROR: You'll need to update UQpyRunner.py to run your specified"
+ "ERROR: You'll need to update UQpyRunner.py to run your specified" # noqa: ISC003
+ ' sampling method!'
)
@@ -148,7 +148,7 @@ def runUQ(
os.chdir(workingDir)
# Run model based on input config
- startTime = time.time()
+ startTime = time.time() # noqa: N806
# model = RunModel(samples=samples.samples, model_script=modelScript,
# input_template=inputTemplate, var_names=variableNames,
# output_script=outputScript, output_object_name=outputObjectName,
@@ -165,17 +165,17 @@ def runUQ(
m = RunModel(ntasks=numberOfTasks, model=model)
m.run(samples.samples)
- runTime = time.time() - startTime
- print('\nTotal time for all experiments: ', runTime)
+ runTime = time.time() - startTime # noqa: N806
+ print('\nTotal time for all experiments: ', runTime) # noqa: T201
- with open(os.path.join(workingDir, '..', 'tabularResults.out'), 'w') as f:
+ with open(os.path.join(workingDir, '..', 'tabularResults.out'), 'w') as f: # noqa: PTH118, PTH123
f.write('%eval_id\t interface\t')
for val in variableNames:
- f.write('%s\t' % val)
+ f.write('%s\t' % val) # noqa: UP031
for val in demandParams:
- f.write('%s\t' % val['name'])
+ f.write('%s\t' % val['name']) # noqa: UP031
f.write('\n')
@@ -190,6 +190,6 @@ def runUQ(
f.write(string)
# Factory for creating UQpy runner
- class Factory:
- def create(self):
+ class Factory: # noqa: D106
+ def create(self): # noqa: D102
return UQpyRunner()
diff --git a/modules/performUQ/other/configureAndRunUQ.py b/modules/performUQ/other/configureAndRunUQ.py
index 735ac257c..4a5018c67 100644
--- a/modules/performUQ/other/configureAndRunUQ.py
+++ b/modules/performUQ/other/configureAndRunUQ.py
@@ -1,17 +1,17 @@
-# written: Michael Gardner @ UNR, Aakash Bangalore Satish @ UCB
+# written: Michael Gardner @ UNR, Aakash Bangalore Satish @ UCB # noqa: INP001, D100
# Use the UQpy driver as a starting point if you want to add other UQ capabilities
-def configureAndRunUQ(
- uqData,
- simulationData,
- randomVarsData,
- demandParams,
- workingDir,
- runType,
- localAppDir,
- remoteAppDir,
+def configureAndRunUQ( # noqa: N802
+ uqData, # noqa: N803
+ simulationData, # noqa: N803
+ randomVarsData, # noqa: N803
+ demandParams, # noqa: N803
+ workingDir, # noqa: N803
+ runType, # noqa: N803
+ localAppDir, # noqa: N803
+ remoteAppDir, # noqa: N803
):
"""This function configures and runs a UQ simulation based on the input
UQ driver and its associated inputs, simulation configuration, random
@@ -28,25 +28,25 @@ def configureAndRunUQ(
runType: Specifies whether computations are being run locally or on an HPC cluster
localAppDir: Directory containing apps for local run
remoteAppDir: Directory containing apps for remote run
- """
- uqDriverOptions = ['UQpy', 'HeirBayes']
+ """ # noqa: D205, D400, D401, D404
+ uqDriverOptions = ['UQpy', 'HeirBayes'] # noqa: N806
for val in uqData['Parameters']:
if val['name'] == 'UQ Driver':
- uqDriver = val['value']
+ uqDriver = val['value'] # noqa: N806
if uqDriver not in uqDriverOptions:
raise ValueError(
- 'ERROR: configureAndRunUQ.py: UQ driver not recognized.'
+ 'ERROR: configureAndRunUQ.py: UQ driver not recognized.' # noqa: ISC003
+ ' Either input incorrectly or class to run UQ driver not'
+ ' implemented: ',
uqDriver,
)
- else:
- if uqDriver == 'UQpy' or uqDriver == 'HeirBayes':
+ else: # noqa: RET506
+ if uqDriver == 'UQpy' or uqDriver == 'HeirBayes': # noqa: PLR1714
pass
- uqDriverClass = locals()[uqDriver + 'Runner']
+ uqDriverClass = locals()[uqDriver + 'Runner'] # noqa: N806
uqDriverClass().runUQ(
uqData,
simulationData,
diff --git a/modules/performUQ/other/createTemplate.py b/modules/performUQ/other/createTemplate.py
index 00be3c90e..83fdf3ad1 100644
--- a/modules/performUQ/other/createTemplate.py
+++ b/modules/performUQ/other/createTemplate.py
@@ -1,10 +1,10 @@
-from pathlib import Path
+from pathlib import Path # noqa: INP001, D100
-def createTemplate(variableNames, templateName):
- filePath = Path('./' + templateName)
+def createTemplate(variableNames, templateName): # noqa: N802, N803, D103
+ filePath = Path('./' + templateName) # noqa: N806
- with open(filePath, 'w') as f:
+ with open(filePath, 'w') as f: # noqa: PTH123
f.write(f'{len(variableNames)}\n')
for name in variableNames:
diff --git a/modules/performUQ/other/prepareUQ.py b/modules/performUQ/other/prepareUQ.py
index 42e0fd70c..ec381712a 100644
--- a/modules/performUQ/other/prepareUQ.py
+++ b/modules/performUQ/other/prepareUQ.py
@@ -1,12 +1,12 @@
-# written: Michael Gardner @ UNR
+# written: Michael Gardner @ UNR # noqa: INP001, D100
# import sys
-def prepareUQ(paramsFile, inputFile, outputFile, rvSpecifier):
+def prepareUQ(paramsFile, inputFile, outputFile, rvSpecifier): # noqa: C901, N802, N803, D103
# These are the delimiter choices, which can expanded as more UQ programs are added. Remember to also
# extend the factory in rvDelimiter to handle additional cases
- rvDelimiterChoices = ['SimCenterDelimiter', 'UQpyDelimiter']
+ rvDelimiterChoices = ['SimCenterDelimiter', 'UQpyDelimiter'] # noqa: N806
if rvSpecifier not in rvDelimiterChoices:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -21,51 +21,51 @@ def prepareUQ(paramsFile, inputFile, outputFile, rvSpecifier):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Open parameters file and read parameter settings
- numRVs = 0
- lineCount = 0
- rvNames = []
- rvSettings = []
+ numRVs = 0 # noqa: N806
+ lineCount = 0 # noqa: N806
+ rvNames = [] # noqa: N806
+ rvSettings = [] # noqa: N806
try:
- with open(paramsFile) as params:
+ with open(paramsFile) as params: # noqa: PTH123
for line in params:
if lineCount == 0:
- rvNames = [i.strip() for i in line.split(',')]
- numRVs = len(rvNames)
+ rvNames = [i.strip() for i in line.split(',')] # noqa: N806
+ numRVs = len(rvNames) # noqa: N806, F841
# Replace RV names based on delimiter
for i, rv in enumerate(rvNames):
rvNames[i] = rvSpecifier.replaceRV(rv)
else:
- rvSettings = [i.strip() for i in line.split(',')]
+ rvSettings = [i.strip() for i in line.split(',')] # noqa: N806
- lineCount = lineCount + 1
+ lineCount = lineCount + 1 # noqa: N806
except OSError:
- print('ERROR: preProcessUQ.py could not open parameters file: ' + paramsFile)
+ print('ERROR: preProcessUQ.py could not open parameters file: ' + paramsFile) # noqa: T201
# Next, open input file and search for random variables that need to be replaced by parameter realizations
- inputTemplate = 'inputTemplate'
- realizationOutput = 'outputFile'
+ inputTemplate = 'inputTemplate' # noqa: N806
+ realizationOutput = 'outputFile' # noqa: N806
try:
- inputTemplate = open(inputFile)
+ inputTemplate = open(inputFile) # noqa: SIM115, PTH123, N806
except OSError:
- print(
+ print( # noqa: T201
'ERROR: preProcessUQ.py could not open input template file: ' + inputFile
)
try:
- realizationOutput = open(outputFile, 'w')
+ realizationOutput = open(outputFile, 'w') # noqa: SIM115, PTH123, N806
except OSError:
- print('ERROR: preProcessUQ.py could not open output file: ' + outputFile)
+ print('ERROR: preProcessUQ.py could not open output file: ' + outputFile) # noqa: T201
# Iterate over all lines in input template
for line in inputTemplate:
# Iterate over all RVs to check they need to be replaced
for i, rv in enumerate(rvNames):
- try:
- line = line.replace(rv, rvSettings[i])
- except:
+ try: # noqa: SIM105
+ line = line.replace(rv, rvSettings[i]) # noqa: PLW2901
+ except: # noqa: S110, PERF203, E722
pass
realizationOutput.write(line)
diff --git a/modules/performUQ/other/processUQpyOutput.py b/modules/performUQ/other/processUQpyOutput.py
index 8fb5e9690..cf68c7c3f 100644
--- a/modules/performUQ/other/processUQpyOutput.py
+++ b/modules/performUQ/other/processUQpyOutput.py
@@ -1,8 +1,8 @@
-from pathlib import Path
+from pathlib import Path # noqa: INP001, D100
import numpy as np
-def output_function(index):
- filePath = Path('./results.out').resolve()
+def output_function(index): # noqa: ARG001, D103
+ filePath = Path('./results.out').resolve() # noqa: N806
return np.atleast_2d(np.genfromtxt(filePath))
diff --git a/modules/performUQ/other/runOtherUQ.py b/modules/performUQ/other/runOtherUQ.py
index 84806b957..4057b847a 100644
--- a/modules/performUQ/other/runOtherUQ.py
+++ b/modules/performUQ/other/runOtherUQ.py
@@ -1,4 +1,4 @@
-# written: Michael Gardner @ UNR
+# written: Michael Gardner @ UNR # noqa: INP001, D100
import argparse
import json
@@ -7,7 +7,7 @@
from configureAndRunUQ import configureAndRunUQ
-def main():
+def main(): # noqa: D103
# KEEP THIS FOR NOW--MAYBE BACKEND WILL BE UPDATED ACCEPT DIFFERENT ARGUMENTS...
# parser = argparse.ArgumentParser(description='Generate workflow driver based on input configuration')
# parser.add_argument('--mainWorkDir', '-m', required=True, help="Main work directory")
@@ -15,7 +15,7 @@ def main():
# parser.add_argument('--runType', '-r', required=True, help="Type of run")
# parser.add_argument('--inputFile', '-i', required=True, help="Input JSON file with configuration from UI")
# Options for run type
- runTypeOptions = ['runningLocal', 'runningRemote']
+ runTypeOptions = ['runningLocal', 'runningRemote'] # noqa: N806
# args = parser.parse_args()
@@ -33,35 +33,35 @@ def main():
args, unknowns = parser.parse_known_args()
- inputFile = args.workflowInput
- runType = args.runType
- workflowDriver = args.driverFile
- outputFile = args.workflowOutput
+ inputFile = args.workflowInput # noqa: N806
+ runType = args.runType # noqa: N806
+ workflowDriver = args.driverFile # noqa: N806, F841
+ outputFile = args.workflowOutput # noqa: N806, F841
- cwd = os.getcwd()
- workDirTemp = cwd
+ cwd = os.getcwd() # noqa: PTH109
+ workDirTemp = cwd # noqa: N806
if runType not in runTypeOptions:
- raise ValueError('ERROR: Input run type has to be either local or remote')
+ raise ValueError('ERROR: Input run type has to be either local or remote') # noqa: EM101, TRY003
# change workdir to the templatedir
# os.chdir(workDirTemp)
# cwd = os.getcwd()
# Open input file
- inputdata = {}
- with open(inputFile) as data_file:
- inputData = json.load(data_file)
+ inputdata = {} # noqa: F841
+ with open(inputFile) as data_file: # noqa: PTH123
+ inputData = json.load(data_file) # noqa: N806
- applicationsData = inputData['Applications']
+ applicationsData = inputData['Applications'] # noqa: N806
# Get data to pass to UQ driver
- uqData = inputData['UQ']
- simulationData = applicationsData['FEM']
- randomVarsData = inputData['randomVariables']
- demandParams = inputData['EDP']
- localAppDir = inputData['localAppDir']
- remoteAppDir = inputData['remoteAppDir']
+ uqData = inputData['UQ'] # noqa: N806
+ simulationData = applicationsData['FEM'] # noqa: N806
+ randomVarsData = inputData['randomVariables'] # noqa: N806
+ demandParams = inputData['EDP'] # noqa: N806
+ localAppDir = inputData['localAppDir'] # noqa: N806
+ remoteAppDir = inputData['remoteAppDir'] # noqa: N806
# Run UQ based on data and selected UQ engine--if you need to preprocess files with custom delimiters, use preprocessUQ.py
configureAndRunUQ(
diff --git a/modules/performUQ/other/runWorkflowDriver.py b/modules/performUQ/other/runWorkflowDriver.py
index 259b999e8..76db661ab 100644
--- a/modules/performUQ/other/runWorkflowDriver.py
+++ b/modules/performUQ/other/runWorkflowDriver.py
@@ -1,30 +1,30 @@
-import os
+import os # noqa: INP001, D100
import shutil
from sys import platform
import fire
-def runWorkflow(index):
+def runWorkflow(index): # noqa: N802, D103
index = int(index)
shutil.copy(
- os.path.join(
- os.getcwd(),
+ os.path.join( # noqa: PTH118
+ os.getcwd(), # noqa: PTH109
'InputFiles',
'params_' + str(index) + '.template',
),
- os.path.join(os.getcwd(), 'params.in'),
+ os.path.join(os.getcwd(), 'params.in'), # noqa: PTH109, PTH118
)
command2 = 'blank'
if platform == 'linux' or platform == 'linux2' or platform == 'darwin':
- command2 = os.path.join(os.getcwd(), 'driver')
+ command2 = os.path.join(os.getcwd(), 'driver') # noqa: PTH109, PTH118
elif platform == 'win32':
- command2 = os.path.join(os.getcwd(), 'driver.bat')
+ command2 = os.path.join(os.getcwd(), 'driver.bat') # noqa: PTH109, PTH118
# os.system(command1)
- os.system(command2)
+ os.system(command2) # noqa: S605
if __name__ == '__main__':
diff --git a/modules/performUQ/other/uqRunner.py b/modules/performUQ/other/uqRunner.py
index 91be4f5b0..60cffe147 100644
--- a/modules/performUQ/other/uqRunner.py
+++ b/modules/performUQ/other/uqRunner.py
@@ -1,20 +1,20 @@
-# written: Michael Gardner
+# written: Michael Gardner # noqa: INP001, D100
# DO NOT CHANGE THE FACTORY, JUST IMPORT IT INTO ADDITIONAL DERIVED CLASSES
# Polymorhophic factory for running UQ apps
-class UqRunnerFactory:
- factories = {}
+class UqRunnerFactory: # noqa: D101
+ factories = {} # noqa: RUF012
- def addFactory(id, runnerFactory):
+ def addFactory(id, runnerFactory): # noqa: A002, N802, N803, N805, D102
UqRunnerFactory.factories.put[id] = runnerFactory
# A Template Method:
- def createRunner(id):
+ def createRunner(id): # noqa: A002, N802, N805, D102
if id not in UqRunnerFactory.factories:
- UqRunnerFactory.factories[id] = eval(id + '.Factory()')
+ UqRunnerFactory.factories[id] = eval(id + '.Factory()') # noqa: S307
return UqRunnerFactory.factories[id].create()
# Abstract base class
-class UqRunner:
+class UqRunner: # noqa: D101
pass
diff --git a/modules/performanceAssessment/REDi/REDiWrapper.py b/modules/performanceAssessment/REDi/REDiWrapper.py
index 52b6da290..f8b7bb135 100644
--- a/modules/performanceAssessment/REDi/REDiWrapper.py
+++ b/modules/performanceAssessment/REDi/REDiWrapper.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2019 The Regents of the University of California
# Copyright (c) 2019 Leland Stanford Junior University
#
@@ -51,23 +51,23 @@
import pandas as pd
from REDi.go_redi import go_redi
-this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
+this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120
main_dir = this_dir.parents[1]
sys.path.insert(0, str(main_dir / 'common'))
-from simcenter_common import get_scale_factors
+from simcenter_common import get_scale_factors # noqa: E402
-class NumpyEncoder(json.JSONEncoder):
+class NumpyEncoder(json.JSONEncoder): # noqa: D101
# Encode the numpy datatypes to json
- def default(self, obj):
+ def default(self, obj): # noqa: D102
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
-def get_stats(arr: np.array) -> dict:
+def get_stats(arr: np.array) -> dict: # noqa: D103
# Returns a dictionary of summary stats from the array
if np.min(arr) > 0.0:
@@ -94,7 +94,7 @@ def get_stats(arr: np.array) -> dict:
}
-def clean_up_results(res: dict, keys_to_remove: List[str]) -> dict:
+def clean_up_results(res: dict, keys_to_remove: List[str]) -> dict: # noqa: FA100, D103
# Remove extra keys not needed here
for key in keys_to_remove:
@@ -104,7 +104,7 @@ def clean_up_results(res: dict, keys_to_remove: List[str]) -> dict:
return res
-def clean_up_nistr(nistr: str) -> str:
+def clean_up_nistr(nistr: str) -> str: # noqa: D103
# helper function to convert from Pelicun tag to REDi tag
indices_to_remove = [1, 4]
@@ -115,7 +115,7 @@ def clean_up_nistr(nistr: str) -> str:
return nistr
-def get_replacement_response(replacement_time: float):
+def get_replacement_response(replacement_time: float): # noqa: D103
return {
'repair_class': 'replacement',
'damage_by_component_all_DS': None,
@@ -132,19 +132,19 @@ def get_replacement_response(replacement_time: float):
}
-def get_first_value(val: dict, num_levels: int) -> int:
+def get_first_value(val: dict, num_levels: int) -> int: # noqa: D103
# Get the number of samples that pelicun returns
next_val = next(iter(val.items()))[1]
if num_levels > 0:
return get_first_value(val=next_val, num_levels=num_levels - 1)
- else:
+ else: # noqa: RET505
return next_val
-def main(args):
- print('***Running REDi Seismic Downtime engine***\n')
+def main(args): # noqa: C901, D103, PLR0915
+ print('***Running REDi Seismic Downtime engine***\n') # noqa: T201
pelicun_results_dir = Path(args.dirnameOutput)
@@ -160,49 +160,49 @@ def main(args):
redi_output_dir.mkdir(parents=True)
# dictionary to hold the base input parameter that do not change with every pelicun iteration
- rediInputDict = dict()
+ rediInputDict = dict() # noqa: C408, N806
# load the risk parameters
- pathRiskParams = Path(args.riskParametersPath)
- with open(pathRiskParams, encoding='utf-8') as f:
+ pathRiskParams = Path(args.riskParametersPath) # noqa: N806
+ with open(pathRiskParams, encoding='utf-8') as f: # noqa: PTH123
risk_param_dict = json.load(f)
rediInputDict['risk_parameters'] = risk_param_dict
# import SimCenter's AIM.json file
- pathAim = pelicun_results_dir / 'AIM.json'
- with open(pathAim, encoding='utf-8') as f:
- AIM = json.load(f)
+ pathAim = pelicun_results_dir / 'AIM.json' # noqa: N806
+ with open(pathAim, encoding='utf-8') as f: # noqa: PTH123
+ AIM = json.load(f) # noqa: N806
# Get the CMP_sample json from Pelicun
- pathComponent = pelicun_results_dir / 'CMP_sample.json'
- with open(pathComponent, encoding='utf-8') as f:
- CMP = json.load(f)
+ pathComponent = pelicun_results_dir / 'CMP_sample.json' # noqa: N806
+ with open(pathComponent, encoding='utf-8') as f: # noqa: PTH123
+ CMP = json.load(f) # noqa: N806
# remove Units information - for now
if 'Units' in CMP:
del CMP['Units']
# Get the DMG_sample json from Pelicun
- pathComponentDmg = pelicun_results_dir / 'DMG_sample.json'
- with open(pathComponentDmg, encoding='utf-8') as f:
- CMP_DMG = json.load(f)
+ pathComponentDmg = pelicun_results_dir / 'DMG_sample.json' # noqa: N806
+ with open(pathComponentDmg, encoding='utf-8') as f: # noqa: PTH123
+ CMP_DMG = json.load(f) # noqa: N806
# remove Units information - for now
if 'Units' in CMP_DMG:
del CMP_DMG['Units']
# Get the DV_repair_sample json from Pelicun
- pathComponentDV = pelicun_results_dir / 'DV_repair_sample.json'
- with open(pathComponentDV, encoding='utf-8') as f:
- CMP_DV = json.load(f)
+ pathComponentDV = pelicun_results_dir / 'DV_repair_sample.json' # noqa: N806
+ with open(pathComponentDV, encoding='utf-8') as f: # noqa: PTH123
+ CMP_DV = json.load(f) # noqa: N806
# remove Units information - for now
if 'Units' in CMP_DV:
del CMP_DV['Units']
# Load the csv version of the decision vars
- with zipfile.ZipFile(
+ with zipfile.ZipFile( # noqa: SIM117
pelicun_results_dir / 'DV_repair_sample.zip', 'r'
) as zip_ref:
# Read the CSV file inside the zip file into memory
@@ -219,10 +219,10 @@ def main(args):
# Define a list of keywords to search for in column names
keywords = ['replacement-collapse', 'replacement-irreparable']
- DVs = ['Cost', 'Time']
+ DVs = ['Cost', 'Time'] # noqa: N806
- DVReplacementDict = {}
- for DV in DVs:
+ DVReplacementDict = {} # noqa: N806
+ for DV in DVs: # noqa: N806
columns_to_check = [
col
for col in data.columns
@@ -234,18 +234,18 @@ def main(args):
DVReplacementDict[DV] = result_vector
# Find columns containing replace or collapse keywords
- buildingirreparableOrCollapsed = (data[columns_to_check] != 0).any(axis=1)
+ buildingirreparableOrCollapsed = (data[columns_to_check] != 0).any(axis=1) # noqa: N806
sum_collapsed_buildings = sum(buildingirreparableOrCollapsed)
- print(
+ print( # noqa: T201
f'There are {sum_collapsed_buildings} collapsed or irreparable buildings from Pelicun'
)
# Get some general information
gen_info = AIM['DL']['Asset']
- nStories = int(gen_info['NumberOfStories'])
+ nStories = int(gen_info['NumberOfStories']) # noqa: N806
rediInputDict['nFloor'] = nStories
# Get the plan area
@@ -272,23 +272,23 @@ def main(args):
num_workers = max(int(total_building_area / 1000), 1)
# Get the replacement cost and time
- DL_info = AIM['DL']['Losses']['Repair']
+ DL_info = AIM['DL']['Losses']['Repair'] # noqa: N806
# Note these are not the random
- replacementCost = DL_info['ReplacementCost']['Median']
+ replacementCost = DL_info['ReplacementCost']['Median'] # noqa: N806
rediInputDict['replacement_cost'] = (
float(replacementCost) / 1e6
) # Needs to be in the millions of dollars
- replacementTime = float(DL_info['ReplacementTime']['Median'])
+ replacementTime = float(DL_info['ReplacementTime']['Median']) # noqa: N806
# convert replacement time to days from worker_days
- replacementTime = replacementTime / num_workers
+ replacementTime = replacementTime / num_workers # noqa: N806
rediInputDict['replacement_time'] = replacementTime
- final_results_dict = dict()
- log_output: List[str] = []
+ final_results_dict = dict() # noqa: C408
+ log_output: List[str] = [] # noqa: FA100
for sample in range(num_samples):
if buildingirreparableOrCollapsed[sample]:
@@ -310,20 +310,20 @@ def main(args):
# ...,
# {'NISTR' : nistr_id_n,
# 'Qty' : [dir_1, dir_2]}]
- components: List[List[Dict[str, Any]]] = [[] for i in range(nStories + 1)]
+ components: List[List[Dict[str, Any]]] = [[] for i in range(nStories + 1)] # noqa: FA100
# Pelicun output map ###
# "B1033.061b": { <- component nistr
# "4": { <- floor
# "1": [ <- direction
- CMP = clean_up_results(
+ CMP = clean_up_results( # noqa: N806
res=CMP, keys_to_remove=['collapse', 'excessiveRID', 'irreparable']
)
for nistr, floors in CMP.items():
- nistr = clean_up_nistr(nistr=nistr)
+ nistr = clean_up_nistr(nistr=nistr) # noqa: PLW2901
for floor, dirs in floors.items():
- floor = int(floor)
+ floor = int(floor) # noqa: PLW2901
dir_1 = 0.0
dir_2 = 0.0
@@ -342,8 +342,8 @@ def main(args):
dir_2 = float(dirs['2'][sample])
else:
- raise ValueError(
- 'Could not parse the directionality in the Pelicun output.'
+ raise ValueError( # noqa: TRY003
+ 'Could not parse the directionality in the Pelicun output.' # noqa: EM101
)
cmp_dict = {'NISTR': nistr, 'Qty': [dir_1, dir_2]}
@@ -356,19 +356,19 @@ def main(args):
# The highest level, outer list is associated with the number of damage states while the inner list corresponds to the number of floors
# [ds_1, ds_2, ..., ds_n]
# where ds_n = [num_dmg_units_floor_1, num_dmg_units_floor_2, ..., num_dmg_units_floor_n]
- component_damage: Dict[str, List[List[float]]] = {}
+ component_damage: Dict[str, List[List[float]]] = {} # noqa: FA100
# Pelicun output map ###
# "B1033.061b": { <- component nistr
# "4": { <- floor
# "1": { <- direction
# "0": [ <- damage state -> Note that zero.. means undamaged
- CMP_DMG = clean_up_results(
+ CMP_DMG = clean_up_results( # noqa: N806
res=CMP_DMG, keys_to_remove=['collapse', 'excessiveRID', 'irreparable']
)
collapse_flag = False
for nistr, floors in CMP_DMG.items():
- nistr = clean_up_nistr(nistr=nistr)
+ nistr = clean_up_nistr(nistr=nistr) # noqa: PLW2901
# Get the number of damage states
num_ds = len(get_first_value(val=floors, num_levels=1))
@@ -377,11 +377,11 @@ def main(args):
ds_qtys = [floor_qtys for i in range(num_ds)]
for floor, dirs in floors.items():
- floor = int(floor)
+ floor = int(floor) # noqa: PLW2901
- for dir, dir_qty in dirs.items():
+ for dir, dir_qty in dirs.items(): # noqa: B007, A001
for ds, qtys in dir_qty.items():
- ds = int(ds)
+ ds = int(ds) # noqa: PLW2901
qty = float(qtys[sample])
if math.isnan(qty):
@@ -423,7 +423,7 @@ def main(args):
# The second level list contains the number of stories, so a list with length 5 will be a 4-story building with a roof.
# The third-level list is based on the number of damage states (not including Damage State 0).
- total_consequences: Dict[str, List[List[float]]] = {}
+ total_consequences: Dict[str, List[List[float]]] = {} # noqa: FA100
# Pelicun output map ###
# "COST": { <- cost/time key
@@ -432,7 +432,7 @@ def main(args):
# "1": { <- damage state
# "4": { <- floor
# "1": [ <- direction
- for nistr in cost_dict.keys():
+ for nistr in cost_dict.keys(): # noqa: SIM118
# Handle the case of the nested nistr which will be the same for FEMA P-58
cost_res = cost_dict[nistr][nistr]
time_res = time_dict[nistr][nistr]
@@ -445,28 +445,28 @@ def main(args):
cost_floor_list = floor_list.copy()
time_floor_list = floor_list.copy()
- for ds in cost_res.keys():
+ for ds in cost_res.keys(): # noqa: SIM118
cost_floor_dict = cost_res[ds]
time_floor_dict = time_res[ds]
- ds = int(ds)
+ ds = int(ds) # noqa: PLW2901
- for floor in cost_floor_dict.keys():
+ for floor in cost_floor_dict.keys(): # noqa: SIM118
cost_dirs_dict = cost_floor_dict[floor]
time_dirs_dict = time_floor_dict[floor]
- floor = int(floor)
+ floor = int(floor) # noqa: PLW2901
total_cost = 0.0
total_time = 0.0
- for dir in cost_dirs_dict.keys():
+ for dir in cost_dirs_dict.keys(): # noqa: A001, SIM118
total_cost += float(cost_dirs_dict[dir][sample])
total_time += float(time_dirs_dict[dir][sample])
cost_floor_list[floor - 1][ds - 1] = total_cost
time_floor_list[floor - 1][ds - 1] = total_time
- nistr = clean_up_nistr(nistr=nistr)
+ nistr = clean_up_nistr(nistr=nistr) # noqa: PLW2901
# Last two items are empty because pelicun does not return injuries and fatalities.
total_consequences[nistr] = [
@@ -486,7 +486,7 @@ def main(args):
rediInputDict['_id'] = f'SimCenter_{sample}'
# Save the dictionary to a JSON file
- with open(
+ with open( # noqa: PTH123
redi_input_dir / f'redi_{sample}.json', 'w', encoding='utf-8'
) as f:
json.dump(this_it_input, f, indent=4, cls=NumpyEncoder)
@@ -511,8 +511,8 @@ def main(args):
final_results_dict[sample] = res
# Create a high-level json with detailed results
- print(f'Saving all samples to: {redi_output_dir}/redi_results_all_samples.json')
- with open(
+ print(f'Saving all samples to: {redi_output_dir}/redi_results_all_samples.json') # noqa: T201
+ with open( # noqa: PTH123
redi_output_dir / 'redi_results_all_samples.json', 'w', encoding='utf-8'
) as f:
json.dump(final_results_dict, f, cls=NumpyEncoder)
@@ -520,7 +520,7 @@ def main(args):
# Create a smaller summary stats json for recovery time and max delay
dt_all_samples = [[] for i in range(3)]
max_delay_list = []
- for sample, res in final_results_dict.items():
+ for sample, res in final_results_dict.items(): # noqa: B007
total_downtime = res['building_total_downtime']
# full recovery - functional recovery - immediate occupancy
for i in range(3):
@@ -540,15 +540,15 @@ def main(args):
'Immediate Occupancy': get_stats(immediate_occupancy_list),
}
- print(f'Saving all samples to: {redi_output_dir}/redi_summary_stats.json')
- with open(
+ print(f'Saving all samples to: {redi_output_dir}/redi_summary_stats.json') # noqa: T201
+ with open( # noqa: PTH123
redi_output_dir / 'redi_summary_stats.json', 'w', encoding='utf-8'
) as f:
json.dump(summary_stats, f, indent=4, cls=NumpyEncoder)
# Write the log file
- print(f'Saving REDi log file at: {redi_output_dir}/redi_log.txt')
- with open(redi_output_dir / 'redi_log.txt', 'w', encoding='utf-8') as file:
+ print(f'Saving REDi log file at: {redi_output_dir}/redi_log.txt') # noqa: T201
+ with open(redi_output_dir / 'redi_log.txt', 'w', encoding='utf-8') as file: # noqa: PTH123
# Iterate through the list of strings and write each one to the file
for string in log_output:
file.write(string + '\n')
@@ -577,24 +577,24 @@ def main(args):
# Check for the required arguments
if not args.dirnameOutput:
- print(
+ print( # noqa: T201
'Path to the working directory containing the Pelicun results is required'
)
- exit()
+ exit() # noqa: PLR1722
elif not Path(args.dirnameOutput).exists():
- print(
+ print( # noqa: T201
f'Provided path to the working directory {args.dirnameOutput} does not exist'
)
- exit()
+ exit() # noqa: PLR1722
if not args.riskParametersPath:
- print('Path to the risk parameters JSON file is required')
- exit()
+ print('Path to the risk parameters JSON file is required') # noqa: T201
+ exit() # noqa: PLR1722
elif not Path(args.riskParametersPath).exists():
- print(
+ print( # noqa: T201
f'Provided path to the risk parameters JSON file {args.riskParametersPath} does not exist'
)
- exit()
+ exit() # noqa: PLR1722
start_time = time.time()
@@ -602,7 +602,7 @@ def main(args):
end_time = time.time()
elapsed_time = end_time - start_time
- print(f'REDi finished. Elapsed time: {elapsed_time:.2f} seconds')
+ print(f'REDi finished. Elapsed time: {elapsed_time:.2f} seconds') # noqa: T201
'/opt/homebrew/anaconda3/envs/simcenter/bin/python' '/Users/stevan.gavrilovic/Desktop/SimCenter/SimCenterBackendApplications/applications/performanceAssessment/REDi/REDiWrapper.py' '--riskParametersPath' '/Users/stevan.gavrilovic/Desktop/SimCenter/build-PBE-Qt_6_5_1_for_macOS-Debug/PBE.app/Contents/MacOS/Examples/pbdl-0003/src/risk_params.json' '--dirnameOutput' '/Users/stevan.gavrilovic/Documents/PBE/LocalWorkDir/tmp.SimCenter'
diff --git a/modules/systemPerformance/REWET/REWET/Damage.py b/modules/systemPerformance/REWET/REWET/Damage.py
index 57a216ae5..6d6de2112 100644
--- a/modules/systemPerformance/REWET/REWET/Damage.py
+++ b/modules/systemPerformance/REWET/REWET/Damage.py
@@ -2,7 +2,7 @@
This module is responsible for calculating damage to t=different componenst of
the system, including pipe lines. pupmo and so.
@author: snaeimi
-"""
+""" # noqa: N999, D205, D400
import logging
import math
@@ -18,7 +18,7 @@
logger = logging.getLogger(__name__)
-class EarthquakeScenario:
+class EarthquakeScenario: # noqa: D101
def __init__(self, magnitude, depth, x_coord, y_coord, eq_time):
self.M = abs(magnitude)
self.depth = abs(depth)
@@ -27,13 +27,13 @@ def __init__(self, magnitude, depth, x_coord, y_coord, eq_time):
self.coordinate['Y'] = y_coord
self.time = abs(eq_time)
- def getWNTREarthquakeObject(self):
+ def getWNTREarthquakeObject(self): # noqa: N802, D102
return wntrfr.scenario.Earthquake(
(self.coordinate['X'], self.coordinate['Y']), self.M, self.depth
)
-class Damage:
+class Damage: # noqa: D101
def __init__(self, registry, scenario_set):
self.scenario_set = scenario_set
self.pipe_leak = pd.Series(dtype='O')
@@ -57,7 +57,7 @@ def __init__(self, registry, scenario_set):
# self._nodal_damage_method = None
self._pipe_damage_method = 1
- def readDamageFromPickleFile(
+ def readDamageFromPickleFile( # noqa: N802
self,
pickle_file_name,
csv_file_name,
@@ -75,9 +75,9 @@ def readDamageFromPickleFile(
Returns
-------
- """
- with open(pickle_file_name, 'rb') as pckf:
- w = pickle.load(pckf)
+ """ # noqa: D205, D400, D401, D404, D414
+ with open(pickle_file_name, 'rb') as pckf: # noqa: PTH123
+ w = pickle.load(pckf) # noqa: S301
name_list = pd.read_csv(csv_file_name, index_col=csv_index)
damage_name_list = []
@@ -107,13 +107,13 @@ def readDamageFromPickleFile(
# print(name_list)
- def readPumpDamage(self, file_name):
+ def readPumpDamage(self, file_name): # noqa: N802, D102
pump_list = pd.read_csv(file_name)
self.damaged_pumps = pump_list['Pump_ID']
- def readNodalDamage(self, file_address):
+ def readNodalDamage(self, file_address): # noqa: N802, D102
temp = pd.read_csv(file_address)
- for ind, val in temp.iterrows():
+ for ind, val in temp.iterrows(): # noqa: B007
temp_data = {}
temp_data['node_name'] = str(val['NodeID'])
temp_data['node_RR'] = val['RR']
@@ -130,43 +130,43 @@ def readNodalDamage(self, file_address):
self.node_damage = self.node_damage.append(pd.Series(data=[temp_data]))
- self.node_damage.reset_index(drop=True, inplace=True)
+ self.node_damage.reset_index(drop=True, inplace=True) # noqa: PD002
- def setNodalDamageModelParameter(self, damage_param):
+ def setNodalDamageModelParameter(self, damage_param): # noqa: N802, D102
self._registry.nodal_equavalant_diameter = damage_param
- def readDamageGiraffeFormat(self, break_file_name, leak_file_name):
+ def readDamageGiraffeFormat(self, break_file_name, leak_file_name): # noqa: N802, D102
break_temp = pd.read_csv(break_file_name)
leak_temp = pd.read_csv(leak_file_name)
- temp_break_pipe_ID = break_temp['PipeID']
- temp_leak_pipe_ID = leak_temp['PipeID']
+ temp_break_pipe_ID = break_temp['PipeID'] # noqa: N806
+ temp_leak_pipe_ID = leak_temp['PipeID'] # noqa: N806
if temp_break_pipe_ID.dtype != 'O':
- temp_break_pipe_ID = temp_break_pipe_ID.apply(lambda x: str(x))
+ temp_break_pipe_ID = temp_break_pipe_ID.apply(lambda x: str(x)) # noqa: N806
break_temp['PipeID'] = temp_break_pipe_ID
if temp_leak_pipe_ID.dtype != 'O':
- temp_leak_pipe_ID = temp_leak_pipe_ID.apply(lambda x: str(x))
+ temp_leak_pipe_ID = temp_leak_pipe_ID.apply(lambda x: str(x)) # noqa: N806
leak_temp['PipeID'] = temp_leak_pipe_ID
temp1 = break_temp[['PipeID', 'BreakRatio']]
- temp1._is_copy = None
+ temp1._is_copy = None # noqa: SLF001
temp1['damage'] = 'break'
- temp1.rename(columns={'BreakRatio': 'ratio'}, inplace=True)
+ temp1.rename(columns={'BreakRatio': 'ratio'}, inplace=True) # noqa: PD002
temp2 = leak_temp[['PipeID', 'LeakRatio']]
- temp2._is_copy = None
- temp2.rename(columns={'LeakRatio': 'ratio'}, inplace=True)
+ temp2._is_copy = None # noqa: SLF001
+ temp2.rename(columns={'LeakRatio': 'ratio'}, inplace=True) # noqa: PD002
temp2['damage'] = 'leak'
temp = pd.concat([temp1, temp2])
temp = temp.sort_values(['PipeID', 'ratio'], ascending=(True, False))
- unique_pipe_ID = temp['PipeID'].unique().tolist()
+ unique_pipe_ID = temp['PipeID'].unique().tolist() # noqa: N806
- for pipe_ID in unique_pipe_ID:
+ for pipe_ID in unique_pipe_ID: # noqa: N806
selected_damage = temp[temp['PipeID'] == pipe_ID]
if 'break' in selected_damage['damage'].tolist():
@@ -185,10 +185,10 @@ def readDamageGiraffeFormat(self, break_file_name, leak_file_name):
else:
number = len(selected_damage)
- temp_leak_D = pd.Series(data=selected_damage.index)
- temp_leak_D = temp_leak_D.apply(lambda x: leak_temp.loc[x, 'LeakD'])
+ temp_leak_D = pd.Series(data=selected_damage.index) # noqa: N806
+ temp_leak_D = temp_leak_D.apply(lambda x: leak_temp.loc[x, 'LeakD']) # noqa: N806
- leak_D = ((temp_leak_D**2).sum()) ** 0.5
+ leak_D = ((temp_leak_D**2).sum()) ** 0.5 # noqa: N806
tmp_leak = {
'pipe_id': pipe_ID,
'leak_loc': 0.5,
@@ -203,10 +203,10 @@ def readDamageGiraffeFormat(self, break_file_name, leak_file_name):
)
)
- def addPipeDamageByDamageList(self, damage_list, leak_type_ref, break_type_ref):
+ def addPipeDamageByDamageList(self, damage_list, leak_type_ref, break_type_ref): # noqa: ARG002, N802, D102
# leaked_damage = damage_list[damage_list['damage_state']==leak_type_ref]
- for ind, row in damage_list.iterrows():
+ for ind, row in damage_list.iterrows(): # noqa: B007
if row['damage_state'] == 0: # break
tmp_break = {
'pipe_id': row['name'],
@@ -231,9 +231,9 @@ def addPipeDamageByDamageList(self, damage_list, leak_type_ref, break_type_ref):
)
)
else:
- raise ValueError('There is an unknown damage type')
+ raise ValueError('There is an unknown damage type') # noqa: EM101, TRY003
- def readDamageFromTextFile(self, path):
+ def readDamageFromTextFile(self, path): # noqa: N802
"""Reads a damage from scenario from a text file and add the information
to the damage class object.
@@ -242,21 +242,21 @@ def readDamageFromTextFile(self, path):
[path] : str
The input file name
- """
- if path == None:
- raise ValueError('None in path')
- file = open(path)
+ """ # noqa: D205, D401
+ if path == None: # noqa: E711
+ raise ValueError('None in path') # noqa: EM101, TRY003
+ file = open(path) # noqa: SIM115, PTH123
lines = file.readlines()
line_cnt = 0
for line in lines:
- line_cnt += 1
+ line_cnt += 1 # noqa: SIM113
sline = line.split()
line_length = len(sline)
if sline[0].lower() == 'leak':
# print(len(sline))
temp_leak = {}
- if line_length < 4:
+ if line_length < 4: # noqa: PLR2004
raise OSError(
'There must be at least 4 arguments in line' + repr(line_cnt)
)
@@ -264,7 +264,7 @@ def readDamageFromTextFile(self, path):
temp_leak['pipe_id'] = sline[1]
temp_leak['leak_loc'] = float(sline[2])
temp_leak['leak_type'] = int(sline[3])
- if line_length > 4:
+ if line_length > 4: # noqa: PLR2004
temp_leak['leak_time'] = float(sline[4])
else:
temp_leak['leak_time'] = self.default_time
@@ -275,13 +275,13 @@ def readDamageFromTextFile(self, path):
)
elif sline[0].lower() == 'break':
- if line_length < 3:
- raise OSError('Line cannot have more than three arguments')
+ if line_length < 3: # noqa: PLR2004
+ raise OSError('Line cannot have more than three arguments') # noqa: EM101, TRY003
# print('Probelm 2')
temp_break = {}
temp_break['pipe_id'] = sline[1]
temp_break['break_loc'] = float(sline[2])
- if line_length > 3:
+ if line_length > 3: # noqa: PLR2004
temp_break['break_time'] = float(sline[3])
else:
temp_break['break_time'] = self.default_time
@@ -295,13 +295,13 @@ def readDamageFromTextFile(self, path):
else:
logger.warning(sline)
logger.warning(
- 'No recogniziable command in damage file, line'
+ 'No recogniziable command in damage file, line' # noqa: G003
+ repr(line_cnt)
+ '\n'
)
file.close()
- def applyNodalDamage(self, WaterNetwork, current_time):
+ def applyNodalDamage(self, WaterNetwork, current_time): # noqa: C901, N802, N803
"""Apply Nodal Damage
Parameters
@@ -313,20 +313,20 @@ def applyNodalDamage(self, WaterNetwork, current_time):
-------
None.
- """
+ """ # noqa: D400
if self.node_damage.empty:
- print('no node damage at all')
+ print('no node damage at all') # noqa: T201
return
curren_time_node_damage = self.node_damage[current_time]
- if type(curren_time_node_damage) == dict:
+ if type(curren_time_node_damage) == dict: # noqa: E721
curren_time_node_damage = pd.Series(
[curren_time_node_damage], index=[current_time]
)
- elif type(curren_time_node_damage) == pd.Series:
+ elif type(curren_time_node_damage) == pd.Series: # noqa: E721
if curren_time_node_damage.empty:
- print('No node damage at time ' + str(current_time))
+ print('No node damage at time ' + str(current_time)) # noqa: T201
return
else:
raise ValueError(
@@ -339,28 +339,28 @@ def applyNodalDamage(self, WaterNetwork, current_time):
# self._nodal_damage_method = self._registry.settings['damage_node_model']
method = self._registry.settings['damage_node_model']
if method == 'Predefined_demand':
- for ind, val in curren_time_node_damage.items():
+ for ind, val in curren_time_node_damage.items(): # noqa: B007, PERF102
node_name = val['node_name']
- pre_EQ_Demand = val['node_Pre_EQ_Demand']
- post_EQ_Demand = val['node_Post_EQ_Demand']
+ pre_EQ_Demand = val['node_Pre_EQ_Demand'] # noqa: N806
+ post_EQ_Demand = val['node_Post_EQ_Demand'] # noqa: N806
# if node_name not in WaterNetwork.node_name_list and icheck==True:
# raise ValueError('Node in damage list is not in water network model: '+repr(node_name))
# elif icheck==False:
# continue
node_cur_dem = (
- WaterNetwork.get_node(node_name)
+ WaterNetwork.get_node(node_name) # noqa: SLF001
.demand_timeseries_list._list[0]
.base_value
)
# print(str(pre_EQ_Demand) + ' ' + str(node_cur_dem))
# print(node_name)
- if abs(pre_EQ_Demand - node_cur_dem) > 0.001:
- raise
+ if abs(pre_EQ_Demand - node_cur_dem) > 0.001: # noqa: PLR2004
+ raise # noqa: PLE0704
ratio = post_EQ_Demand / pre_EQ_Demand
- WaterNetwork.get_node(node_name).demand_timeseries_list._list[
+ WaterNetwork.get_node(node_name).demand_timeseries_list._list[ # noqa: SLF001
0
].base_value = node_cur_dem * ratio
@@ -371,13 +371,13 @@ def applyNodalDamage(self, WaterNetwork, current_time):
self._registry.addNodalDamage(demand_damage)
elif (
- method == 'equal_diameter_emitter'
+ method == 'equal_diameter_emitter' # noqa: PLR1714
or method == 'equal_diameter_reservoir'
):
temp1 = []
temp2 = []
temp_new_explicit_leak_data = []
- for ind, val in curren_time_node_damage.items():
+ for ind, val in curren_time_node_damage.items(): # noqa: B007, PERF102
node_name = val['node_name']
number_of_damages = val['Number_of_damages']
pipe_length = val['node_Pipe_Length'] * 1000
@@ -415,7 +415,7 @@ def applyNodalDamage(self, WaterNetwork, current_time):
new_pipe_name_list = dict(zip(temp1, temp_new_explicit_leak_data))
self._registry.addNodalDamage(demand_damage, new_pipe_name_list)
elif method == 'SDD':
- for ind, val in curren_time_node_damage.items():
+ for ind, val in curren_time_node_damage.items(): # noqa: B007, PERF102
node_name = val['node_name']
number_of_damages = val['Number_of_damages']
pipe_length = val['node_Pipe_Length'] * 1000
@@ -426,7 +426,7 @@ def applyNodalDamage(self, WaterNetwork, current_time):
maximum_node_demand = 10
pipe_equal_length = pipe_length / 10
_hl = 8
- _C = 100
+ _C = 100 # noqa: N806
before_damage_pipe_length = pipe_equal_length / 2
over_designed_diameter = (
10.67 * (maximum_node_demand / _C) ** 1.852 * (pipe_length / _hl)
@@ -458,7 +458,7 @@ def applyNodalDamage(self, WaterNetwork, current_time):
)
# Node-to-middle-junction pipe definition
- OVD_pipe_name = 'lk_ODP_' + node_name
+ OVD_pipe_name = 'lk_ODP_' + node_name # noqa: N806
WaterNetwork.add_pipe(
OVD_pipe_name,
node_name,
@@ -484,11 +484,11 @@ def applyNodalDamage(self, WaterNetwork, current_time):
over_designed_diameter,
}
else:
- raise ValueError('Unknown nodal damage method')
+ raise ValueError('Unknown nodal damage method') # noqa: EM101, TRY003
# return WaterNetwork
- def getNd(self, mp, number_of_damages, sum_of_length):
+ def getNd(self, mp, number_of_damages, sum_of_length): # noqa: N802, D102
rr = number_of_damages / sum_of_length * 1000
node_damage_parametrs = self._registry.settings['node_damage_model']
@@ -520,9 +520,9 @@ def getNd(self, mp, number_of_damages, sum_of_length):
+ x
)
nd = 0.0036 * float(mp) + 0.9012 + (0.0248 * float(mp) - 0.877) * float(rr)
- return nd
+ return nd # noqa: RET504
- def getNd2(self, mp, number_of_damages, sum_of_length):
+ def getNd2(self, mp, number_of_damages, sum_of_length): # noqa: N802, D102
rr = number_of_damages / sum_of_length * 1000
node_damage_parametrs = self._registry.settings['node_damage_model']
@@ -553,9 +553,9 @@ def getNd2(self, mp, number_of_damages, sum_of_length):
+ x
)
- return nd
+ return nd # noqa: RET504
- def getEmitterCdAndElevation(
+ def getEmitterCdAndElevation( # noqa: N802, D102
self,
real_node_name,
wn,
@@ -564,28 +564,26 @@ def getEmitterCdAndElevation(
mp,
q,
):
- mp = (
- mp * 1.4223
- ) # this is because our CURRENT relationship is base on psi
- rr = number_of_damages / sum_of_length * 1000
+ mp = mp * 1.4223 # this is because our CURRENT relationship is base on psi
+ rr = number_of_damages / sum_of_length * 1000 # noqa: F841
nd = self.getNd(mp, number_of_damages, sum_of_length)
# equavalant_pipe_diameter = ( ((nd-1)*q)**2 /(0.125*9.81*3.14**2 * mp/1.4223) )**(1/4) * 1
if real_node_name == 'CC1381':
- print(nd)
+ print(nd) # noqa: T201
nd2 = self.getNd2(mp, number_of_damages, sum_of_length)
- print(nd2)
+ print(nd2) # noqa: T201
- node = wn.get_node(real_node_name)
+ node = wn.get_node(real_node_name) # noqa: F841
# new_elavation = node.elevation
nd = nd - 1
# nd0 = 0.0036*0 + 0.9012 + (0.0248*0-0.877)*rr
nd0 = self.getNd(0, number_of_damages, sum_of_length)
if real_node_name == 'CC1381':
- print(nd0)
+ print(nd0) # noqa: T201
nd02 = self.getNd2(0, number_of_damages, sum_of_length)
- print(nd02)
+ print(nd02) # noqa: T201
nd0 = nd0 - 1
alpha = (nd - nd0) / (mp)
mp0 = -1 * (nd0) / alpha
@@ -593,7 +591,7 @@ def getEmitterCdAndElevation(
cd = alpha * q
return cd, mp0
- def addExplicitLeakWithReservoir(
+ def addExplicitLeakWithReservoir( # noqa: N802, D102
self,
node_name,
number_of_damages,
@@ -602,7 +600,7 @@ def addExplicitLeakWithReservoir(
):
method = self._registry.settings['damage_node_model']
if (
- method == 'equal_diameter_emitter'
+ method == 'equal_diameter_emitter' # noqa: PLR1714
or method == 'equal_diameter_reservoir'
):
node = wn.get_node(node_name)
@@ -620,7 +618,7 @@ def addExplicitLeakWithReservoir(
new_node_name = 'lk_aux_' + node_name
new_pipe_name = 'lk_pipe_' + node_name
- new_C = 100000000000
+ new_C = 100000000000 # noqa: N806
equavalant_pipe_diameter = 1
q = node.demand_timeseries_list[0].base_value
@@ -634,7 +632,7 @@ def addExplicitLeakWithReservoir(
coordinates=new_coord,
)
nn = wn.get_node(new_node_name)
- nn._emitter_coefficient = cd
+ nn._emitter_coefficient = cd # noqa: SLF001
wn.options.hydraulic.emitter_exponent = 1
wn.add_pipe(
new_pipe_name,
@@ -683,29 +681,29 @@ def addExplicitLeakWithReservoir(
# new_coord_res = (node.coordinates[0]+10,node.coordinates[1]+20)
else:
- raise ValueError('Unkown Method')
+ raise ValueError('Unkown Method') # noqa: EM101, TRY003
return new_node_name, new_pipe_name, mp, q
- def estimateNodalDamage(self):
+ def estimateNodalDamage(self): # noqa: N802, D102
# res = pd.Series()
temp1 = []
temp2 = []
- for ind, val in self.node_damage.items():
+ for ind, val in self.node_damage.items(): # noqa: B007, PERF102
pipes_length = val['node_Pipe_Length']
- pipes_RR = val['node_RR']
+ pipes_RR = val['node_RR'] # noqa: N806
temp1.append(val['node_name'])
temp2.append(int(np.round(pipes_RR * pipes_length)))
res = pd.Series(data=temp2, index=temp1)
- return res
+ return res # noqa: RET504
- def getPipeDamageListAt(self, time):
+ def getPipeDamageListAt(self, time): # noqa: N802, D102
damaged_pipe_name_list = []
if self.pipe_all_damages.empty:
return damaged_pipe_name_list
current_time_pipe_damages = self.pipe_all_damages[time]
- if type(current_time_pipe_damages) == pd.core.series.Series:
+ if type(current_time_pipe_damages) == pd.core.series.Series: # noqa: E721
current_time_pipe_damages = current_time_pipe_damages.to_list()
else:
current_time_pipe_damages = [current_time_pipe_damages]
@@ -714,9 +712,9 @@ def getPipeDamageListAt(self, time):
cur_damage['pipe_id'] for cur_damage in current_time_pipe_damages
]
damaged_pipe_name_list = list(set(damaged_pipe_name_list))
- return damaged_pipe_name_list
+ return damaged_pipe_name_list # noqa: RET504
- def applyPipeDamages(self, WaterNetwork, current_time):
+ def applyPipeDamages(self, WaterNetwork, current_time): # noqa: C901, N802, N803
"""Apply the damage that we have in damage object. the damage is either
predicted or read from somewhere.
@@ -730,22 +728,22 @@ def applyPipeDamages(self, WaterNetwork, current_time):
current_time : int
current time
- """
+ """ # noqa: D205
last_pipe_id = None
same_pipe_damage_cnt = None
if self.pipe_all_damages.empty:
- print('No Pipe damages at all')
+ print('No Pipe damages at all') # noqa: T201
return
current_time_pipe_damages = self.pipe_all_damages[current_time]
- if type(current_time_pipe_damages) == dict:
+ if type(current_time_pipe_damages) == dict: # noqa: E721
current_time_pipe_damages = pd.Series(
[current_time_pipe_damages], index=[current_time]
)
- elif type(current_time_pipe_damages) == pd.Series:
+ elif type(current_time_pipe_damages) == pd.Series: # noqa: E721
if current_time_pipe_damages.empty:
- print('No Pipe damages at time ' + str(current_time))
+ print('No Pipe damages at time ' + str(current_time)) # noqa: T201
return
else:
raise ValueError(
@@ -827,7 +825,7 @@ def applyPipeDamages(self, WaterNetwork, current_time):
if 'sub_type' in cur_damage:
sub_type = cur_damage['sub_type']
- WaterNetwork = split_pipe(
+ WaterNetwork = split_pipe( # noqa: N806
WaterNetwork,
pipe_id,
new_pipe_id,
@@ -882,7 +880,7 @@ def applyPipeDamages(self, WaterNetwork, current_time):
damage_time = current_time / 3600
logger.debug(
- 'trying to break: ' + cur_damage['pipe_id'] + repr(damage_time)
+ 'trying to break: ' + cur_damage['pipe_id'] + repr(damage_time) # noqa: G003
)
# Naming new nodes and new pipe
new_node_id_for_old_pipe = (
@@ -894,7 +892,7 @@ def applyPipeDamages(self, WaterNetwork, current_time):
new_pipe_id = pipe_id + '_Break_' + repr(same_pipe_damage_cnt)
new_node_id = new_node_id_for_old_pipe
# breaking the node
- WaterNetwork = break_pipe(
+ WaterNetwork = break_pipe( # noqa: N806
WaterNetwork,
pipe_id,
new_pipe_id,
@@ -950,22 +948,22 @@ def applyPipeDamages(self, WaterNetwork, current_time):
)
# return WaterNetwork
- def applyTankDamages(self, WaterNetwork, current_time):
+ def applyTankDamages(self, WaterNetwork, current_time): # noqa: N802, N803, D102
if self.tank_damage.empty:
- print('No Tank Damage at all')
+ print('No Tank Damage at all') # noqa: T201
return
current_time_tank_damage = self.tank_damage[current_time]
- if type(current_time_tank_damage) != str:
+ if type(current_time_tank_damage) != str: # noqa: E721
if current_time_tank_damage.empty:
- print('No Tank Damage at time ' + str(current_time))
+ print('No Tank Damage at time ' + str(current_time)) # noqa: T201
return
else:
current_time_tank_damage = pd.Series(
[current_time_tank_damage], index=[current_time]
)
# print(current_time_tank_damage)
- for ind, value in current_time_tank_damage.items():
+ for ind, value in current_time_tank_damage.items(): # noqa: B007, PERF102
# if value not in WaterNetwork.tank_name_list:
# continue #contibue if there is not a tank with such damage
# connected_link_list = []
@@ -1005,24 +1003,24 @@ def applyTankDamages(self, WaterNetwork, current_time):
elif value == link.end_node.name:
link.end_node = new_node
else:
- raise
+ raise # noqa: PLE0704
- def applyPumpDamages(self, WaterNetwork, current_time):
+ def applyPumpDamages(self, WaterNetwork, current_time): # noqa: N802, N803, D102
# print(type(self.damaged_pumps))
if self.damaged_pumps.empty:
- print('No pump damage at all')
+ print('No pump damage at all') # noqa: T201
return
pump_damage_at_time = self.damaged_pumps[current_time]
- if type(pump_damage_at_time) != str:
+ if type(pump_damage_at_time) != str: # noqa: E721
if pump_damage_at_time.empty:
- print('No Pump Damage at time ' + str(current_time))
+ print('No Pump Damage at time ' + str(current_time)) # noqa: T201
return
else:
pump_damage_at_time = pd.Series(
[pump_damage_at_time], index=[current_time]
)
- for ind, values in pump_damage_at_time.items():
+ for ind, values in pump_damage_at_time.items(): # noqa: B007, PERF102
WaterNetwork.get_link(values).initial_status = LinkStatus(0)
def read_earthquake(self, earthquake_file_name):
@@ -1043,24 +1041,24 @@ def read_earthquake(self, earthquake_file_name):
-------
None.
- """
- if type(earthquake_file_name) != str:
- raise ValueError('string is wanted for earthqiake fie name')
+ """ # noqa: D205
+ if type(earthquake_file_name) != str: # noqa: E721
+ raise ValueError('string is wanted for earthqiake fie name') # noqa: EM101, TRY003
- file = open(earthquake_file_name)
+ file = open(earthquake_file_name) # noqa: SIM115, PTH123
lines = file.readlines()
ct = 0
for line in lines:
- ct += 1
+ ct += 1 # noqa: SIM113
sline = line.split()
line_length = len(sline)
- if line_length != 5:
+ if line_length != 5: # noqa: PLR2004
raise OSError(
'there should be 5 values in line '
+ repr(ct)
+ '\n M[SPACE]depth[SPACE]X coordinate[SPACE]Y coordinate{SPACE]Time'
)
- temp_EQ = EarthquakeScenario(
+ temp_EQ = EarthquakeScenario( # noqa: N806
float(sline[0]),
float(sline[1]),
float(sline[2]),
@@ -1073,18 +1071,18 @@ def read_earthquake(self, earthquake_file_name):
file.close()
self.sortEarthquakeListTimely()
- def sortEarthquakeListTimely(self):
+ def sortEarthquakeListTimely(self): # noqa: N802
"""This functions sorts the list of earthquakes in a timely manner
Returns
-------
None.
- """
+ """ # noqa: D400, D401, D404
self._earthquake.sort_index()
self.is_timely_sorted = True
- def predictDamage(self, wn, iClear=False):
+ def predictDamage(self, wn, iClear=False): # noqa: FBT002, N802, N803
"""This function predict the water network model damage based on probabilistic method.
Parameters
@@ -1099,29 +1097,29 @@ def predictDamage(self, wn, iClear=False):
-------
None.
- """
+ """ # noqa: D401, D404
if iClear:
self.pipe_leak = pd.Series()
self.pipe_break = pd.Series()
- for eq_in, eq in self._earthquake.items():
+ for eq_in, eq in self._earthquake.items(): # noqa: B007, PERF102
wntr_eq = eq.getWNTREarthquakeObject()
distance_to_pipes = wntr_eq.distance_to_epicenter(
wn, element_type=wntrfr.network.Pipe
)
pga = wntr_eq.pga_attenuation_model(distance_to_pipes)
pgv = wntr_eq.pgv_attenuation_model(distance_to_pipes)
- repair_rate = wntr_eq.repair_rate_model(pgv)
+ repair_rate = wntr_eq.repair_rate_model(pgv) # noqa: F841
fc = wntrfr.scenario.FragilityCurve()
fc.add_state('leak', 1, {'Default': lognorm(0.5, scale=0.2)})
fc.add_state('break', 2, {'Default': lognorm(0.5, scale=0.5)})
failure_probability = fc.cdf_probability(pga)
damage_state = fc.sample_damage_state(failure_probability)
- for pipe_ID, ds in damage_state.items():
+ for pipe_ID, ds in damage_state.items(): # noqa: N806
# if wn.get_link(pipe_ID).status==0:
# continue
- if ds == None:
+ if ds == None: # noqa: E711
continue
if ds.lower() == 'leak':
temp = {
@@ -1151,7 +1149,7 @@ def get_damage_distinct_time(self):
damage_time_list : list
Distinct time for all kind of damages
- """
+ """ # noqa: D400
pipe_damage_unique_time = self.pipe_all_damages.index.unique().tolist()
node_damage_unique_time = self.node_damage.index.unique().tolist()
tank_damage_unique_time = self.tank_damage.index.unique().tolist()
@@ -1184,15 +1182,15 @@ def get_earthquake_distict_time(self):
pandas.Series()
a list of distinct time of earthquake.
- """
+ """ # noqa: D205, D400, D401
reg = []
- if self.is_timely_sorted == False:
+ if self.is_timely_sorted == False: # noqa: E712
self.sortEarthquakeListTimely()
time_list = self._earthquake.index
last_value = None
for time in iter(time_list):
- if last_value == None or last_value < time:
+ if last_value == None or last_value < time: # noqa: E711
reg.append(time)
last_value = time
diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/__init__.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/__init__.py
index e69de29bb..b74acee6d 100644
--- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/__init__.py
+++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/__init__.py
@@ -0,0 +1 @@
+# noqa: N999, D104
diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/__init__.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/__init__.py
index e69de29bb..b74acee6d 100644
--- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/__init__.py
+++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/__init__.py
@@ -0,0 +1 @@
+# noqa: N999, D104
diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py
index 721ec7471..9dc65708e 100644
--- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py
+++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/io.py
@@ -146,10 +146,10 @@ def _is_number(s):
bool
Input is a number
- """
+ """ # noqa: D400, D401
try:
float(s)
- return True
+ return True # noqa: TRY300
except ValueError:
return False
@@ -168,7 +168,7 @@ def _str_time_to_sec(s):
int
Integer value of time in seconds.
- """
+ """ # noqa: D401
pattern1 = re.compile(r'^(\d+):(\d+):(\d+)$')
time_tuple = pattern1.search(s)
if bool(time_tuple):
@@ -177,7 +177,7 @@ def _str_time_to_sec(s):
+ int(time_tuple.groups()[1]) * 60
+ int(round(float(time_tuple.groups()[2])))
)
- else:
+ else: # noqa: RET505
pattern2 = re.compile(r'^(\d+):(\d+)$')
time_tuple = pattern2.search(s)
if bool(time_tuple):
@@ -185,16 +185,16 @@ def _str_time_to_sec(s):
int(time_tuple.groups()[0]) * 60 * 60
+ int(time_tuple.groups()[1]) * 60
)
- else:
+ else: # noqa: RET505
pattern3 = re.compile(r'^(\d+)$')
time_tuple = pattern3.search(s)
if bool(time_tuple):
return int(time_tuple.groups()[0]) * 60 * 60
- else:
- raise RuntimeError('Time format in ' 'INP file not recognized. ')
+ else: # noqa: RET505
+ raise RuntimeError('Time format in ' 'INP file not recognized. ') # noqa: EM101, TRY003
-def _clock_time_to_sec(s, am_pm):
+def _clock_time_to_sec(s, am_pm): # noqa: C901
"""Converts EPANET clocktime format to seconds.
Parameters
@@ -211,13 +211,13 @@ def _clock_time_to_sec(s, am_pm):
int
Integer value of time in seconds
- """
+ """ # noqa: D401
if am_pm.upper() == 'AM':
am = True
elif am_pm.upper() == 'PM':
am = False
else:
- raise RuntimeError('am_pm option not recognized; options are AM or PM')
+ raise RuntimeError('am_pm option not recognized; options are AM or PM') # noqa: EM101, TRY003
pattern1 = re.compile(r'^(\d+):(\d+):(\d+)$')
time_tuple = pattern1.search(s)
@@ -231,12 +231,12 @@ def _clock_time_to_sec(s, am_pm):
time_sec -= 3600 * 12
if not am:
if time_sec >= 3600 * 12:
- raise RuntimeError(
- 'Cannot specify am/pm for times greater than 12:00:00'
+ raise RuntimeError( # noqa: TRY003
+ 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101
)
time_sec += 3600 * 12
return time_sec
- else:
+ else: # noqa: RET505
pattern2 = re.compile(r'^(\d+):(\d+)$')
time_tuple = pattern2.search(s)
if bool(time_tuple):
@@ -248,12 +248,12 @@ def _clock_time_to_sec(s, am_pm):
time_sec -= 3600 * 12
if not am:
if time_sec >= 3600 * 12:
- raise RuntimeError(
- 'Cannot specify am/pm for times greater than 12:00:00'
+ raise RuntimeError( # noqa: TRY003
+ 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101
)
time_sec += 3600 * 12
return time_sec
- else:
+ else: # noqa: RET505
pattern3 = re.compile(r'^(\d+)$')
time_tuple = pattern3.search(s)
if bool(time_tuple):
@@ -262,13 +262,13 @@ def _clock_time_to_sec(s, am_pm):
time_sec -= 3600 * 12
if not am:
if time_sec >= 3600 * 12:
- raise RuntimeError(
- 'Cannot specify am/pm for times greater than 12:00:00'
+ raise RuntimeError( # noqa: TRY003
+ 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101
)
time_sec += 3600 * 12
return time_sec
- else:
- raise RuntimeError('Time format in ' 'INP file not recognized. ')
+ else: # noqa: RET505
+ raise RuntimeError('Time format in ' 'INP file not recognized. ') # noqa: EM101, TRY003
def _sec_to_string(sec):
@@ -295,7 +295,7 @@ def __init__(self):
self.top_comments = []
self.curves = OrderedDict()
- def read(self, inp_files, wn=None):
+ def read(self, inp_files, wn=None): # noqa: C901
"""Read an EPANET INP file and load data into a water network model object.
Both EPANET 2.0 and EPANET 2.2 INP file options are recognized and handled.
@@ -311,7 +311,7 @@ def read(self, inp_files, wn=None):
:class:`~wntrfr.network.model.WaterNetworkModel`
A water network model object
- """
+ """ # noqa: D205
if wn is None:
wn = WaterNetworkModel()
self.wn = wn
@@ -331,16 +331,16 @@ def read(self, inp_files, wn=None):
section = None
lnum = 0
edata = {'fname': filename}
- with open(filename, encoding='utf-8') as f:
+ with open(filename, encoding='utf-8') as f: # noqa: PTH123
for line in f:
lnum += 1
edata['lnum'] = lnum
- line = line.strip()
+ line = line.strip() # noqa: PLW2901
nwords = len(line.split())
if len(line) == 0 or nwords == 0:
# Blank line
continue
- elif line.startswith('['):
+ elif line.startswith('['): # noqa: RET507
vals = line.split(None, 1)
sec = vals[0].upper()
# Add handlers to deal with extra 'S'es (or missing 'S'es) in INP file
@@ -357,7 +357,7 @@ def read(self, inp_files, wn=None):
section = sec
# logger.info('%(fname)s:%(lnum)-6d %(sec)13s section found' % edata)
continue
- elif sec == '[END]':
+ elif sec == '[END]': # noqa: RET507
# logger.info('%(fname)s:%(lnum)-6d %(sec)13s end of file found' % edata)
section = None
break
@@ -456,14 +456,14 @@ def read(self, inp_files, wn=None):
self._read_tags()
# Set the _inpfile io data inside the water network, so it is saved somewhere
- wn._inpfile = self
+ wn._inpfile = self # noqa: SLF001
# Finish tags
self._read_end()
return self.wn
- def write(self, filename, wn, units=None, version=2.2, force_coordinates=False):
+ def write(self, filename, wn, units=None, version=2.2, force_coordinates=False): # noqa: FBT002
"""Write a water network model into an EPANET INP file.
.. note::
@@ -491,7 +491,7 @@ def write(self, filename, wn, units=None, version=2.2, force_coordinates=False):
"""
if not isinstance(wn, WaterNetworkModel):
- raise ValueError('Must pass a WaterNetworkModel object')
+ raise ValueError('Must pass a WaterNetworkModel object') # noqa: EM101, TRY003, TRY004
if units is not None and isinstance(units, str):
units = units.upper()
self.flow_units = FlowUnits[units]
@@ -508,7 +508,7 @@ def write(self, filename, wn, units=None, version=2.2, force_coordinates=False):
self.flow_units = FlowUnits.GPM
if self.mass_units is None:
self.mass_units = MassUnits.mg
- with open(filename, 'wb') as f:
+ with open(filename, 'wb') as f: # noqa: PTH123
self._write_title(f, wn)
self._write_junctions(f, wn)
self._write_reservoirs(f, wn)
@@ -548,8 +548,8 @@ def write(self, filename, wn, units=None, version=2.2, force_coordinates=False):
def _read_title(self):
lines = []
- for lnum, line in self.sections['[TITLE]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[TITLE]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -560,7 +560,7 @@ def _write_title(self, f, wn):
if wn.name is not None:
f.write(f'; Filename: {wn.name}\n'.encode(sys_default_enc))
f.write(
- f'; WNTR: {wntrfr.__version__}\n; Created: {datetime.datetime.now():%Y-%m-%d %H:%M:%S}\n'.encode(
+ f'; WNTR: {wntrfr.__version__}\n; Created: {datetime.datetime.now():%Y-%m-%d %H:%M:%S}\n'.encode( # noqa: DTZ005
sys_default_enc
)
)
@@ -572,19 +572,19 @@ def _write_title(self, f, wn):
def _read_junctions(self):
# try:
- for lnum, line in self.sections['[JUNCTIONS]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[JUNCTIONS]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
- if len(current) > 3:
+ if len(current) > 3: # noqa: PLR2004
pat = current[3]
elif self.wn.options.hydraulic.pattern:
pat = self.wn.options.hydraulic.pattern
else:
pat = self.wn.patterns.default_pattern
base_demand = 0.0
- if len(current) > 2:
+ if len(current) > 2: # noqa: PLR2004
base_demand = to_si(
self.flow_units, float(current[2]), HydParam.Demand
)
@@ -613,7 +613,7 @@ def _write_junctions(self, f, wn):
junction = wn.nodes[junction_name]
# sina added this
- if junction._is_isolated == True:
+ if junction._is_isolated == True: # noqa: SLF001, E712
continue
if junction.demand_timeseries_list:
@@ -633,7 +633,7 @@ def _write_junctions(self, f, wn):
else:
base_demand = 0.0
demand_pattern = None
- E = {
+ E = { # noqa: N806
'name': junction_name,
'elev': from_si(
self.flow_units, junction.elevation, HydParam.Elevation
@@ -648,12 +648,12 @@ def _write_junctions(self, f, wn):
f.write('\n'.encode(sys_default_enc))
def _read_reservoirs(self):
- for lnum, line in self.sections['[RESERVOIRS]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[RESERVOIRS]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
- if len(current) == 2:
+ if len(current) == 2: # noqa: PLR2004
self.wn.add_reservoir(
current[0],
to_si(
@@ -678,10 +678,10 @@ def _write_reservoirs(self, f, wn):
reservoir = wn.nodes[reservoir_name]
# sina added this
- if reservoir._is_isolated == True:
+ if reservoir._is_isolated == True: # noqa: SLF001, E712
continue
- E = {
+ E = { # noqa: N806
'name': reservoir_name,
'head': from_si(
self.flow_units,
@@ -698,13 +698,13 @@ def _write_reservoirs(self, f, wn):
f.write('\n'.encode(sys_default_enc))
def _read_tanks(self):
- for lnum, line in self.sections['[TANKS]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[TANKS]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
volume = None
- if len(current) >= 8: # Volume curve provided
+ if len(current) >= 8: # Volume curve provided # noqa: PLR2004
volume = float(current[6])
curve_name = current[7]
if curve_name == '*':
@@ -717,20 +717,20 @@ def _read_tanks(self):
curve_points.append((x, y))
self.wn.add_curve(curve_name, 'VOLUME', curve_points)
# curve = self.wn.get_curve(curve_name)
- if len(current) == 9:
+ if len(current) == 9: # noqa: PLR2004
overflow = current[8]
else:
overflow = False
- elif len(current) == 7:
+ elif len(current) == 7: # noqa: PLR2004
curve_name = None
overflow = False
volume = float(current[6])
- elif len(current) == 6:
+ elif len(current) == 6: # noqa: PLR2004
curve_name = None
overflow = False
volume = 0.0
else:
- raise RuntimeError('Tank entry format not recognized.')
+ raise RuntimeError('Tank entry format not recognized.') # noqa: EM101, TRY003
self.wn.add_tank(
current[0],
to_si(self.flow_units, float(current[1]), HydParam.Elevation),
@@ -745,7 +745,7 @@ def _read_tanks(self):
def _write_tanks(self, f, wn, version=2.2):
f.write('[TANKS]\n'.encode(sys_default_enc))
- if version != 2.2:
+ if version != 2.2: # noqa: PLR2004
f.write(
_TANK_LABEL.format(
';ID',
@@ -778,10 +778,10 @@ def _write_tanks(self, f, wn, version=2.2):
for tank_name in nnames:
tank = wn.nodes[tank_name]
- if tank._is_isolated == True: # sina added this
+ if tank._is_isolated == True: # sina added this # noqa: SLF001, E712
continue
- E = {
+ E = { # noqa: N806
'name': tank_name,
'elev': from_si(self.flow_units, tank.elevation, HydParam.Elevation),
'initlev': from_si(
@@ -803,7 +803,7 @@ def _write_tanks(self, f, wn, version=2.2):
}
if tank.vol_curve is not None:
E['curve'] = tank.vol_curve.name
- if version == 2.2:
+ if version == 2.2: # noqa: PLR2004
if tank.overflow:
E['overflow'] = 'YES'
if tank.vol_curve is None:
@@ -812,12 +812,12 @@ def _write_tanks(self, f, wn, version=2.2):
f.write('\n'.encode(sys_default_enc))
def _read_pipes(self):
- for lnum, line in self.sections['[PIPES]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[PIPES]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
- if len(current) == 8:
+ if len(current) == 8: # noqa: PLR2004
minor_loss = float(current[6])
if current[7].upper() == 'CV':
link_status = LinkStatus.Open
@@ -825,11 +825,11 @@ def _read_pipes(self):
else:
link_status = LinkStatus[current[7].upper()]
check_valve = False
- elif len(current) == 7:
+ elif len(current) == 7: # noqa: PLR2004
minor_loss = float(current[6])
link_status = LinkStatus.Open
check_valve = False
- elif len(current) == 6:
+ elif len(current) == 6: # noqa: PLR2004
minor_loss = 0.0
link_status = LinkStatus.Open
check_valve = False
@@ -865,10 +865,10 @@ def _write_pipes(self, f, wn):
for pipe_name in lnames:
pipe = wn.links[pipe_name]
- if pipe._is_isolated == True: # Sina added this
+ if pipe._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
- E = {
+ E = { # noqa: N806
'name': pipe_name,
'node1': pipe.start_node_name,
'node2': pipe.end_node_name,
@@ -886,7 +886,7 @@ def _write_pipes(self, f, wn):
f.write(_PIPE_ENTRY.format(**E).encode(sys_default_enc))
f.write('\n'.encode(sys_default_enc))
- def _read_pumps(self):
+ def _read_pumps(self): # noqa: C901
def create_curve(curve_name):
curve_points = []
if (
@@ -899,10 +899,10 @@ def create_curve(curve_name):
curve_points.append((x, y))
self.wn.add_curve(curve_name, 'HEAD', curve_points)
curve = self.wn.get_curve(curve_name)
- return curve
+ return curve # noqa: RET504
- for lnum, line in self.sections['[PUMPS]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[PUMPS]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -930,14 +930,14 @@ def create_curve(curve_name):
# assert pattern is None, 'In [PUMPS] entry, PATTERN may only be specified once.'
pattern = self.wn.get_pattern(current[i + 1]).name
else:
- raise RuntimeError('Pump keyword in inp file not recognized.')
+ raise RuntimeError('Pump keyword in inp file not recognized.') # noqa: EM101, TRY003
if speed is None:
speed = 1.0
if pump_type is None:
- raise RuntimeError(
- 'Either head curve id or pump power must be specified for all pumps.'
+ raise RuntimeError( # noqa: TRY003
+ 'Either head curve id or pump power must be specified for all pumps.' # noqa: EM101
)
self.wn.add_pump(
current[0], current[1], current[2], pump_type, value, speed, pattern
@@ -955,10 +955,10 @@ def _write_pumps(self, f, wn):
for pump_name in lnames:
pump = wn.links[pump_name]
- if pump._is_isolated == True: # Sina added this
+ if pump._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
- E = {
+ E = { # noqa: N806
'name': pump_name,
'node1': pump.start_node_name,
'node2': pump.end_node_name,
@@ -975,7 +975,7 @@ def _write_pumps(self, f, wn):
from_si(self.flow_units, pump.power, HydParam.Power)
)
else:
- raise RuntimeError('Only head or power info is supported of pumps.')
+ raise RuntimeError('Only head or power info is supported of pumps.') # noqa: EM101, TRY003
tmp_entry = _PUMP_ENTRY
if pump.speed_timeseries.base_value != 1:
E['speed_keyword'] = 'SPEED'
@@ -995,16 +995,16 @@ def _write_pumps(self, f, wn):
f.write('\n'.encode(sys_default_enc))
def _read_valves(self):
- for lnum, line in self.sections['[VALVES]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[VALVES]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
- if len(current) == 6:
+ if len(current) == 6: # noqa: PLR2004
current.append(0.0)
- elif len(current) != 7:
- raise RuntimeError(
- 'The [VALVES] section of an INP file must have 6 or 7 entries.'
+ elif len(current) != 7: # noqa: PLR2004
+ raise RuntimeError( # noqa: TRY003
+ 'The [VALVES] section of an INP file must have 6 or 7 entries.' # noqa: EM101
)
valve_type = current[4].upper()
if valve_type in ['PRV', 'PSV', 'PBV']:
@@ -1025,7 +1025,7 @@ def _read_valves(self):
self.wn.add_curve(curve_name, 'HEADLOSS', curve_points)
valve_set = curve_name
else:
- raise RuntimeError('VALVE type "%s" unrecognized' % valve_type)
+ raise RuntimeError('VALVE type "%s" unrecognized' % valve_type) # noqa: UP031
self.wn.add_valve(
current[0],
current[1],
@@ -1048,10 +1048,10 @@ def _write_valves(self, f, wn):
for valve_name in lnames:
valve = wn.links[valve_name]
- if valve._is_isolated == True: # Sina added this
+ if valve._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
- E = {
+ E = { # noqa: N806
'name': valve_name,
'node1': valve.start_node_name,
'node2': valve.end_node_name,
@@ -1083,10 +1083,10 @@ def _write_valves(self, f, wn):
f.write('\n'.encode(sys_default_enc))
def _read_emitters(self):
- for lnum, line in self.sections[
+ for lnum, line in self.sections[ # noqa: B007
'[EMITTERS]'
]: # Private attribute on junctions
- line = line.split(';')[0]
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -1106,7 +1106,7 @@ def _write_emitters(self, f, wn):
junction = wn.nodes[junction_name]
# Sina added this
- if junction._is_isolated == True:
+ if junction._is_isolated == True: # noqa: SLF001, E712
continue
if junction.emitter_coefficient:
@@ -1123,13 +1123,13 @@ def _write_emitters(self, f, wn):
# System Operation
def _read_curves(self):
- for lnum, line in self.sections['[CURVES]']:
+ for lnum, line in self.sections['[CURVES]']: # noqa: B007
# It should be noted carefully that these lines are never directly
# applied to the WaterNetworkModel object. Because different curve
# types are treated differently, each of the curves are converted
# the first time they are used, and this is used to build up a
# dictionary for those conversions to take place.
- line = line.split(';')[0]
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -1139,7 +1139,7 @@ def _read_curves(self):
self.curves[curve_name].append((float(current[1]), float(current[2])))
self.wn.curves[curve_name] = None
- def _write_curves(self, f, wn):
+ def _write_curves(self, f, wn): # noqa: C901
f.write('[CURVES]\n'.encode(sys_default_enc))
f.write(
_CURVE_LABEL.format(';ID', 'X-Value', 'Y-Value').encode(sys_default_enc)
@@ -1203,9 +1203,9 @@ def _write_curves(self, f, wn):
def _read_patterns(self):
_patterns = OrderedDict()
- for lnum, line in self.sections['[PATTERNS]']:
+ for lnum, line in self.sections['[PATTERNS]']: # noqa: B007
# read the lines for each pattern -- patterns can be multiple lines of arbitrary length
- line = line.split(';')[0]
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -1220,10 +1220,10 @@ def _read_patterns(self):
for pattern_name, pattern in _patterns.items():
# add the patterns to the water network model
self.wn.add_pattern(pattern_name, pattern)
- if not self.wn.options.hydraulic.pattern and '1' in _patterns.keys():
+ if not self.wn.options.hydraulic.pattern and '1' in _patterns.keys(): # noqa: SIM118
# If there is a pattern called "1", then it is the default pattern if no other is supplied
self.wn.options.hydraulic.pattern = '1'
- elif self.wn.options.hydraulic.pattern not in _patterns.keys():
+ elif self.wn.options.hydraulic.pattern not in _patterns.keys(): # noqa: SIM118
# Sanity check - if the default pattern does not exist and it is not '1' then balk
# If default is '1' but it does not exist, then it is constant
# Any other default that does not exist is an error
@@ -1231,8 +1231,8 @@ def _read_patterns(self):
self.wn.options.hydraulic.pattern is not None
and self.wn.options.hydraulic.pattern != '1'
):
- raise KeyError(
- f'Default pattern {self.wn.options.hydraulic.pattern} is undefined'
+ raise KeyError( # noqa: TRY003
+ f'Default pattern {self.wn.options.hydraulic.pattern} is undefined' # noqa: EM102
)
self.wn.options.hydraulic.pattern = None
@@ -1252,13 +1252,13 @@ def _write_patterns(self, f, wn):
f.write(f'\n{pattern_name:s} {i:f}'.encode(sys_default_enc))
else:
f.write(f' {i:f}'.encode(sys_default_enc))
- count += 1
+ count += 1 # noqa: SIM113
f.write('\n'.encode(sys_default_enc))
f.write('\n'.encode(sys_default_enc))
- def _read_energy(self):
- for lnum, line in self.sections['[ENERGY]']:
- line = line.split(';')[0]
+ def _read_energy(self): # noqa: C901
+ for lnum, line in self.sections['[ENERGY]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -1356,8 +1356,8 @@ def _write_energy(self, f, wn):
f.write('\n'.encode(sys_default_enc))
def _read_status(self):
- for lnum, line in self.sections['[STATUS]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[STATUS]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -1370,7 +1370,7 @@ def _read_status(self):
):
new_status = LinkStatus[current[1].upper()]
link.initial_status = new_status
- link._user_status = new_status
+ link._user_status = new_status # noqa: SLF001
else:
if isinstance(link, wntrfr.network.Valve):
new_status = LinkStatus.Active
@@ -1392,7 +1392,7 @@ def _read_status(self):
setting = float(current[1])
# link.setting = setting
link.initial_setting = setting
- link._user_status = new_status
+ link._user_status = new_status # noqa: SLF001
link.initial_status = new_status
def _write_status(self, f, wn):
@@ -1403,7 +1403,7 @@ def _write_status(self, f, wn):
for pump_name in pnames:
pump = wn.links[pump_name]
- if pump._is_isolated == True: # Sina added this
+ if pump._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
if pump.initial_status == LinkStatus.Closed:
@@ -1424,7 +1424,7 @@ def _write_status(self, f, wn):
for valve_name in vnames:
valve = wn.links[valve_name]
- if valve._is_isolated == True: # Sina added this
+ if valve._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
# valve_type = valve.valve_type
@@ -1452,7 +1452,7 @@ def _write_status(self, f, wn):
def _read_controls(self):
control_count = 0
- for lnum, line in self.sections['[CONTROLS]']:
+ for lnum, line in self.sections['[CONTROLS]']: # noqa: B007
control_count += 1
control_name = 'control ' + str(control_count)
@@ -1472,21 +1472,21 @@ def _read_controls(self):
else:
self.wn.add_control(control_name, control_obj)
- def _write_controls(self, f, wn):
+ def _write_controls(self, f, wn): # noqa: C901
def get_setting(control_action, control_name):
- value = control_action._value
- attribute = control_action._attribute.lower()
+ value = control_action._value # noqa: SLF001
+ attribute = control_action._attribute.lower() # noqa: SLF001
if attribute == 'status':
setting = LinkStatus(value).name
elif attribute == 'base_speed':
setting = str(value)
elif attribute == 'setting' and isinstance(
- control_action._target_obj,
+ control_action._target_obj, # noqa: SLF001
Valve,
):
- valve = control_action._target_obj
+ valve = control_action._target_obj # noqa: SLF001
valve_type = valve.valve_type
- if valve_type == 'PRV' or valve_type == 'PSV' or valve_type == 'PBV':
+ if valve_type == 'PRV' or valve_type == 'PSV' or valve_type == 'PBV': # noqa: PLR1714
setting = str(from_si(self.flow_units, value, HydParam.Pressure))
elif valve_type == 'FCV':
setting = str(from_si(self.flow_units, value, HydParam.Flow))
@@ -1501,7 +1501,7 @@ def get_setting(control_action, control_name):
else:
setting = None
logger.warning(
- 'Could not write control ' + str(control_name) + ' - skipping'
+ 'Could not write control ' + str(control_name) + ' - skipping' # noqa: G003
)
return setting
@@ -1509,80 +1509,80 @@ def get_setting(control_action, control_name):
f.write('[CONTROLS]\n'.encode(sys_default_enc))
# Time controls and conditional controls only
for text, all_control in wn.controls():
- control_action = all_control._then_actions[0]
+ control_action = all_control._then_actions[0] # noqa: SLF001
# Sina added this
- if control_action._target_obj._is_isolated == True:
+ if control_action._target_obj._is_isolated == True: # noqa: SLF001, E712
continue
if all_control.epanet_control_type is not _ControlType.rule:
if (
- len(all_control._then_actions) != 1
- or len(all_control._else_actions) != 0
+ len(all_control._then_actions) != 1 # noqa: SLF001
+ or len(all_control._else_actions) != 0 # noqa: SLF001
):
- logger.error('Too many actions on CONTROL "%s"' % text)
- raise RuntimeError('Too many actions on CONTROL "%s"' % text)
+ logger.error('Too many actions on CONTROL "%s"' % text) # noqa: G002, UP031
+ raise RuntimeError('Too many actions on CONTROL "%s"' % text) # noqa: UP031
if not isinstance(control_action.target()[0], Link):
continue
if isinstance(
- all_control._condition,
+ all_control._condition, # noqa: SLF001
(SimTimeCondition, TimeOfDayCondition),
):
entry = '{ltype} {link} {setting} AT {compare} {time:g}\n'
vals = {
- 'ltype': control_action._target_obj.link_type,
- 'link': control_action._target_obj.name,
+ 'ltype': control_action._target_obj.link_type, # noqa: SLF001
+ 'link': control_action._target_obj.name, # noqa: SLF001
'setting': get_setting(control_action, text),
'compare': 'TIME',
- 'time': all_control._condition._threshold / 3600.0,
+ 'time': all_control._condition._threshold / 3600.0, # noqa: SLF001
}
if vals['setting'] is None:
continue
- if isinstance(all_control._condition, TimeOfDayCondition):
+ if isinstance(all_control._condition, TimeOfDayCondition): # noqa: SLF001
vals['compare'] = 'CLOCKTIME'
f.write(entry.format(**vals).encode(sys_default_enc))
elif (
- all_control._condition._source_obj._is_isolated == True
+ all_control._condition._source_obj._is_isolated == True # noqa: SLF001, E712
): # Sina added this
continue
- elif isinstance(all_control._condition, (ValueCondition)):
+ elif isinstance(all_control._condition, (ValueCondition)): # noqa: SLF001
entry = '{ltype} {link} {setting} IF {ntype} {node} {compare} {thresh}\n'
vals = {
- 'ltype': control_action._target_obj.link_type,
- 'link': control_action._target_obj.name,
+ 'ltype': control_action._target_obj.link_type, # noqa: SLF001
+ 'link': control_action._target_obj.name, # noqa: SLF001
'setting': get_setting(control_action, text),
- 'ntype': all_control._condition._source_obj.node_type,
- 'node': all_control._condition._source_obj.name,
+ 'ntype': all_control._condition._source_obj.node_type, # noqa: SLF001
+ 'node': all_control._condition._source_obj.name, # noqa: SLF001
'compare': 'above',
'thresh': 0.0,
}
if vals['setting'] is None:
continue
- if all_control._condition._relation in [
+ if all_control._condition._relation in [ # noqa: SLF001
np.less,
np.less_equal,
Comparison.le,
Comparison.lt,
]:
vals['compare'] = 'below'
- threshold = all_control._condition._threshold
- if isinstance(all_control._condition._source_obj, Tank):
+ threshold = all_control._condition._threshold # noqa: SLF001
+ if isinstance(all_control._condition._source_obj, Tank): # noqa: SLF001
vals['thresh'] = from_si(
self.flow_units, threshold, HydParam.HydraulicHead
)
- elif isinstance(all_control._condition._source_obj, Junction):
+ elif isinstance(all_control._condition._source_obj, Junction): # noqa: SLF001
vals['thresh'] = from_si(
self.flow_units, threshold, HydParam.Pressure
)
else:
- raise RuntimeError(
- 'Unknown control for EPANET INP files: %s'
+ raise RuntimeError( # noqa: TRY004
+ 'Unknown control for EPANET INP files: %s' # noqa: UP031
% type(all_control)
)
f.write(entry.format(**vals).encode(sys_default_enc))
elif not isinstance(all_control, Control):
raise RuntimeError(
- 'Unknown control for EPANET INP files: %s'
+ 'Unknown control for EPANET INP files: %s' # noqa: UP031
% type(all_control)
)
f.write('\n'.encode(sys_default_enc))
@@ -1605,21 +1605,21 @@ def _write_rules(self, f, wn):
if all_control.epanet_control_type == _ControlType.rule:
# Sina added this begin
try:
- if all_control._then_actions[0]._target_obj._is_isolated == True:
+ if all_control._then_actions[0]._target_obj._is_isolated == True: # noqa: SLF001, E712
continue
- except:
+ except: # noqa: S110, E722
pass
try:
- if all_control.condition._source_obj._is_isolated == True:
+ if all_control.condition._source_obj._is_isolated == True: # noqa: SLF001, E712
continue
- except:
+ except: # noqa: S110, E722
pass
# Sina added this end
if all_control.name == '':
- all_control._name = text
+ all_control._name = text # noqa: SLF001
rule = _EpanetRule('blah', self.flow_units, self.mass_units)
rule.from_if_then_else(all_control)
f.write(entry.format(str(rule)).encode(sys_default_enc))
@@ -1628,7 +1628,7 @@ def _write_rules(self, f, wn):
def _read_demands(self):
demand_num = 0
has_been_read = set()
- for lnum, line in self.sections['[DEMANDS]']:
+ for lnum, line in self.sections['[DEMANDS]']: # noqa: B007
ldata = line.split(';')
if len(ldata) > 1 and (ldata[1] != ''):
category = ldata[1]
@@ -1639,7 +1639,7 @@ def _read_demands(self):
continue
demand_num = demand_num + 1
node = self.wn.get_node(current[0])
- if len(current) == 2:
+ if len(current) == 2: # noqa: PLR2004
pattern = None
else:
pattern = self.wn.get_pattern(current[2])
@@ -1666,11 +1666,11 @@ def _write_demands(self, f, wn):
# nodes.sort()
for node in nodes:
# Sina added this
- if wn.get_node(node)._is_isolated == True:
+ if wn.get_node(node)._is_isolated == True: # noqa: SLF001, E712
continue
demands = wn.get_node(node).demand_timeseries_list
if len(demands) > 1:
- for ct, demand in enumerate(demands):
+ for ct, demand in enumerate(demands): # noqa: B007
cat = str(demand.category)
# if cat == 'EN2 base':
# cat = ''
@@ -1678,7 +1678,7 @@ def _write_demands(self, f, wn):
cat = ''
else:
cat = ' ;' + demand.category
- E = {
+ E = { # noqa: N806
'node': node,
'base': from_si(
self.flow_units, demand.base_value, HydParam.Demand
@@ -1698,8 +1698,8 @@ def _write_demands(self, f, wn):
# Water Quality
def _read_quality(self):
- for lnum, line in self.sections['[QUALITY]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[QUALITY]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -1722,12 +1722,12 @@ def _read_quality(self):
def _write_quality(self, f, wn):
f.write('[QUALITY]\n'.encode(sys_default_enc))
entry = '{:10s} {:10s}\n'
- label = '{:10s} {:10s}\n'
+ label = '{:10s} {:10s}\n' # noqa: F841
nnodes = list(wn.nodes.keys())
# nnodes.sort()
for node_name in nnodes:
node = wn.nodes[node_name]
- if node._is_isolated == True: # Sina added this
+ if node._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
if node.initial_quality:
if wn.options.quality.parameter == 'CHEMICAL':
@@ -1748,13 +1748,13 @@ def _write_quality(self, f, wn):
)
f.write('\n'.encode(sys_default_enc))
- def _read_reactions(self):
- BulkReactionCoeff = QualParam.BulkReactionCoeff
- WallReactionCoeff = QualParam.WallReactionCoeff
+ def _read_reactions(self): # noqa: C901
+ BulkReactionCoeff = QualParam.BulkReactionCoeff # noqa: N806
+ WallReactionCoeff = QualParam.WallReactionCoeff # noqa: N806
if self.mass_units is None:
self.mass_units = MassUnits.mg
- for lnum, line in self.sections['[REACTIONS]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[REACTIONS]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -1819,7 +1819,7 @@ def _read_reactions(self):
elif key1 == 'ROUGHNESS':
self.wn.options.reaction.roughness_correl = float(current[2])
else:
- raise RuntimeError('Reaction option not recognized: %s' % key1)
+ raise RuntimeError('Reaction option not recognized: %s' % key1) # noqa: UP031
def _write_reactions(self, f, wn):
f.write('[REACTIONS]\n'.encode(sys_default_enc))
@@ -1831,7 +1831,7 @@ def _write_reactions(self, f, wn):
entry_int = ' {:s} {:s} {:d}\n'
entry_float = ' {:s} {:s} {:<10.4f}\n'
for tank_name, tank in wn.nodes(Tank):
- if tank._is_isolated == True: # Sina added this
+ if tank._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
if tank.bulk_coeff is not None:
f.write(
@@ -1848,7 +1848,7 @@ def _write_reactions(self, f, wn):
).encode(sys_default_enc)
)
for pipe_name, pipe in wn.links(Pipe):
- if pipe._is_isolated == True: # Sina added this
+ if pipe._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
if pipe.bulk_coeff is not None:
f.write(
@@ -1937,8 +1937,8 @@ def _write_reactions(self, f, wn):
def _read_sources(self):
source_num = 0
- for lnum, line in self.sections['[SOURCES]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[SOURCES]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -1958,7 +1958,7 @@ def _read_sources(self):
QualParam.Concentration,
self.mass_units,
)
- if len(current) == 3:
+ if len(current) == 3: # noqa: PLR2004
self.wn.add_source(
'INP' + str(source_num), current[0], current[1], strength, None
)
@@ -1980,12 +1980,12 @@ def _write_sources(self, f, wn):
sys_default_enc
)
)
- nsources = list(wn._sources.keys())
+ nsources = list(wn._sources.keys()) # noqa: SLF001
# nsources.sort()
for source_name in nsources:
- source = wn._sources[source_name]
+ source = wn._sources[source_name] # noqa: SLF001
- if source._is_isolated == True: # Sina added this
+ if source._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
if source.source_type.upper() == 'MASS':
@@ -2003,7 +2003,7 @@ def _write_sources(self, f, wn):
self.mass_units,
)
- E = {
+ E = { # noqa: N806
'node': source.node_name,
'type': source.source_type,
'quality': str(strength),
@@ -2019,8 +2019,8 @@ def _write_sources(self, f, wn):
f.write('\n'.encode(sys_default_enc))
def _read_mixing(self):
- for lnum, line in self.sections['[MIXING]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[MIXING]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -2029,12 +2029,12 @@ def _read_mixing(self):
tank = self.wn.get_node(tank_name)
if key == 'MIXED':
tank.mixing_model = MixType.Mix1
- elif key == '2COMP' and len(current) > 2:
+ elif key == '2COMP' and len(current) > 2: # noqa: PLR2004
tank.mixing_model = MixType.Mix2
tank.mixing_fraction = float(current[2])
- elif key == '2COMP' and len(current) < 3:
+ elif key == '2COMP' and len(current) < 3: # noqa: PLR2004
raise RuntimeError(
- 'Mixing model 2COMP requires fraction on tank %s' % tank_name
+ 'Mixing model 2COMP requires fraction on tank %s' % tank_name # noqa: UP031
)
elif key == 'FIFO':
tank.mixing_model = MixType.FIFO
@@ -2052,13 +2052,13 @@ def _write_mixing(self, f, wn):
# lnames.sort()
for tank_name in lnames:
tank = wn.nodes[tank_name]
- if tank._mixing_model is not None:
+ if tank._mixing_model is not None: # noqa: SLF001
# Sina added this
- if tank._is_isolated == True:
+ if tank._is_isolated == True: # noqa: SLF001, E712
continue
- if tank._mixing_model in [MixType.Mixed, MixType.Mix1, 0]:
+ if tank._mixing_model in [MixType.Mixed, MixType.Mix1, 0]: # noqa: SLF001
f.write(f' {tank_name:19s} MIXED\n'.encode(sys_default_enc))
- elif tank._mixing_model in [
+ elif tank._mixing_model in [ # noqa: SLF001
MixType.TwoComp,
MixType.Mix2,
'2comp',
@@ -2070,32 +2070,32 @@ def _write_mixing(self, f, wn):
sys_default_enc
)
)
- elif tank._mixing_model in [MixType.FIFO, 2]:
+ elif tank._mixing_model in [MixType.FIFO, 2]: # noqa: SLF001
f.write(f' {tank_name:19s} FIFO\n'.encode(sys_default_enc))
- elif tank._mixing_model in [MixType.LIFO, 3]:
+ elif tank._mixing_model in [MixType.LIFO, 3]: # noqa: SLF001
f.write(f' {tank_name:19s} LIFO\n'.encode(sys_default_enc))
elif (
- isinstance(tank._mixing_model, str)
+ isinstance(tank._mixing_model, str) # noqa: SLF001
and tank.mixing_fraction is not None
):
f.write(
- f' {tank_name:19s} {tank._mixing_model} {tank.mixing_fraction}\n'.encode(
+ f' {tank_name:19s} {tank._mixing_model} {tank.mixing_fraction}\n'.encode( # noqa: SLF001
sys_default_enc
)
)
- elif isinstance(tank._mixing_model, str):
+ elif isinstance(tank._mixing_model, str): # noqa: SLF001
f.write(
- f' {tank_name:19s} {tank._mixing_model}\n'.encode(
+ f' {tank_name:19s} {tank._mixing_model}\n'.encode( # noqa: SLF001
sys_default_enc
)
)
else:
- logger.warning('Unknown mixing model: %s', tank._mixing_model)
+ logger.warning('Unknown mixing model: %s', tank._mixing_model) # noqa: SLF001
f.write('\n'.encode(sys_default_enc))
# Options and Reporting
- def _read_options(self):
+ def _read_options(self): # noqa: C901, PLR0912
edata = OrderedDict()
wn = self.wn
opts = wn.options
@@ -2104,7 +2104,7 @@ def _read_options(self):
edata['sec'] = '[OPTIONS]'
words, comments = _split_line(line)
if words is not None and len(words) > 0:
- if len(words) < 2:
+ if len(words) < 2: # noqa: PLR2004
edata['key'] = words[0]
raise RuntimeError(
'%(lnum)-6d %(sec)13s no value provided for %(key)s' % edata
@@ -2128,7 +2128,7 @@ def _read_options(self):
else:
opts.quality.parameter = 'CHEMICAL'
opts.quality.chemical_name = words[1]
- if len(words) > 2:
+ if len(words) > 2: # noqa: PLR2004
if 'mg' in words[2].lower():
self.mass_units = MassUnits.mg
opts.quality.inpfile_units = words[2]
@@ -2136,8 +2136,8 @@ def _read_options(self):
self.mass_units = MassUnits.ug
opts.quality.inpfile_units = words[2]
else:
- raise ValueError(
- 'Invalid chemical units in OPTIONS section'
+ raise ValueError( # noqa: TRY003
+ 'Invalid chemical units in OPTIONS section' # noqa: EM101
)
else:
self.mass_units = MassUnits.mg
@@ -2158,7 +2158,7 @@ def _read_options(self):
opts.hydraulic.flowchange = float(words[1])
elif key == 'UNBALANCED':
opts.hydraulic.unbalanced = words[1].upper()
- if len(words) > 2:
+ if len(words) > 2: # noqa: PLR2004
opts.hydraulic.unbalanced_value = int(words[2])
elif key == 'MINIMUM':
minimum_pressure = to_si(
@@ -2171,7 +2171,7 @@ def _read_options(self):
)
opts.hydraulic.required_pressure = required_pressure
elif key == 'PRESSURE':
- if len(words) > 2:
+ if len(words) > 2: # noqa: PLR2004
if words[1].upper() == 'EXPONENT':
opts.hydraulic.pressure_exponent = float(words[2])
else:
@@ -2184,7 +2184,7 @@ def _read_options(self):
elif key == 'PATTERN':
opts.hydraulic.pattern = words[1]
elif key == 'DEMAND':
- if len(words) > 2:
+ if len(words) > 2: # noqa: PLR2004
if words[1].upper() == 'MULTIPLIER':
opts.hydraulic.demand_multiplier = float(words[2])
elif words[1].upper() == 'MODEL':
@@ -2201,7 +2201,7 @@ def _read_options(self):
% edata
)
elif key == 'EMITTER':
- if len(words) > 2:
+ if len(words) > 2: # noqa: PLR2004
opts.hydraulic.emitter_exponent = float(words[2])
else:
edata['key'] = 'EMITTER EXPONENT'
@@ -2219,14 +2219,14 @@ def _read_options(self):
opts.hydraulic.damplimit = float(words[1])
elif key == 'MAP':
opts.graphics.map_filename = words[1]
- elif len(words) == 2:
+ elif len(words) == 2: # noqa: PLR2004
edata['key'] = words[0]
setattr(opts, words[0].lower(), float(words[1]))
logger.warning(
'%(lnum)-6d %(sec)13s option "%(key)s" is undocumented; adding, but please verify syntax',
edata,
)
- elif len(words) == 3:
+ elif len(words) == 3: # noqa: PLR2004
edata['key'] = words[0] + ' ' + words[1]
setattr(
opts,
@@ -2239,15 +2239,15 @@ def _read_options(self):
)
if isinstance(opts.time.report_timestep, (float, int)):
if opts.time.report_timestep < opts.time.hydraulic_timestep:
- raise RuntimeError(
- 'opts.report_timestep must be greater than or equal to opts.hydraulic_timestep.'
+ raise RuntimeError( # noqa: TRY003
+ 'opts.report_timestep must be greater than or equal to opts.hydraulic_timestep.' # noqa: EM101
)
if opts.time.report_timestep % opts.time.hydraulic_timestep != 0:
- raise RuntimeError(
- 'opts.report_timestep must be a multiple of opts.hydraulic_timestep'
+ raise RuntimeError( # noqa: TRY003
+ 'opts.report_timestep must be a multiple of opts.hydraulic_timestep' # noqa: EM101
)
- def _write_options(self, f, wn, version=2.2):
+ def _write_options(self, f, wn, version=2.2): # noqa: C901
f.write('[OPTIONS]\n'.encode(sys_default_enc))
entry_string = '{:20s} {:20s}\n'
entry_float = '{:20s} {:.11g}\n'
@@ -2300,7 +2300,7 @@ def _write_options(self, f, wn, version=2.2):
)
# EPANET 2.2 OPTIONS
- if version == 2.0:
+ if version == 2.0: # noqa: PLR2004
pass
else:
if wn.options.hydraulic.headerror != 0:
@@ -2354,7 +2354,7 @@ def _write_options(self, f, wn, version=2.2):
)
# EPANET 2.2 OPTIONS
- if version == 2.0:
+ if version == 2.0: # noqa: PLR2004
if wn.options.hydraulic.demand_model in ['PDA', 'PDD']:
logger.critical(
'You have specified a PDD analysis using EPANET 2.0. This is not supported in EPANET 2.0. The analysis will default to DD mode.'
@@ -2383,7 +2383,7 @@ def _write_options(self, f, wn, version=2.2):
HydParam.Pressure,
)
if (
- required_pressure >= 0.1
+ required_pressure >= 0.1 # noqa: PLR2004
): # EPANET lower limit on required pressure = 0.1 (in psi or m)
f.write(
'{:20s} {:.2f}\n'.format(
@@ -2391,7 +2391,7 @@ def _write_options(self, f, wn, version=2.2):
).encode(sys_default_enc)
)
else:
- warnings.warn(
+ warnings.warn( # noqa: B028
'REQUIRED PRESSURE is below the lower limit for EPANET (0.1 in psi or m). The value has been set to 0.1 in the INP file.'
)
logger.warning(
@@ -2477,8 +2477,8 @@ def _write_options(self, f, wn, version=2.2):
def _read_times(self):
opts = self.wn.options
time_format = ['am', 'AM', 'pm', 'PM']
- for lnum, line in self.sections['[TIMES]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[TIMES]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -2501,7 +2501,7 @@ def _read_times(self):
else int(_str_time_to_sec(current[2]))
)
elif current[1].upper() == 'CLOCKTIME':
- if len(current) > 3:
+ if len(current) > 3: # noqa: PLR2004
time_format = current[3].upper()
else:
# Kludge for 24hr time that needs an AM/PM
@@ -2575,7 +2575,7 @@ def _write_times(self, f, wn):
day = int(hrs / 24)
hrs -= day * 24
- if hrs < 12:
+ if hrs < 12: # noqa: PLR2004
time_format = ' AM'
else:
hrs -= 12
@@ -2600,9 +2600,9 @@ def _write_times(self, f, wn):
)
f.write('\n'.encode(sys_default_enc))
- def _read_report(self):
- for lnum, line in self.sections['[REPORT]']:
- line = line.split(';')[0]
+ def _read_report(self): # noqa: C901
+ for lnum, line in self.sections['[REPORT]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -2645,7 +2645,7 @@ def _read_report(self):
i = ct + 2
self.wn.options.report.links.append(current[i])
elif (
- current[0].lower() not in self.wn.options.report.report_params.keys()
+ current[0].lower() not in self.wn.options.report.report_params.keys() # noqa: SIM118
):
logger.warning('Unknown report parameter: %s', current[0])
continue
@@ -2658,7 +2658,7 @@ def _read_report(self):
current[1].upper()
] = float(current[2])
- def _write_report(self, f, wn):
+ def _write_report(self, f, wn): # noqa: C901
f.write('[REPORT]\n'.encode(sys_default_enc))
report = wn.options.report
if report.status.upper() != 'NO':
@@ -2697,7 +2697,7 @@ def _write_report(self, f, wn):
else:
f.write(f' {link}'.encode(sys_default_enc))
f.write('\n'.encode(sys_default_enc))
- # FIXME: defaults no longer located here
+ # FIXME: defaults no longer located here # noqa: FIX001, TD001, TD002
# for key, item in report.report_params.items():
# if item[1] != item[0]:
# f.write('{:10s} {}\n'.format(key.upper(), item[1]).encode(sys_default_enc))
@@ -2713,8 +2713,8 @@ def _write_report(self, f, wn):
# Network Map/Tags
def _read_coordinates(self):
- for lnum, line in self.sections['[COORDINATES]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[COORDINATES]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -2728,24 +2728,24 @@ def _write_coordinates(self, f, wn):
label = '{:10s} {:10s} {:10s}\n'
f.write(label.format(';Node', 'X-Coord', 'Y-Coord').encode(sys_default_enc))
for name, node in wn.nodes():
- if node._is_isolated == True: # Sina added this
+ if node._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
val = node.coordinates
f.write(entry.format(name, val[0], val[1]).encode(sys_default_enc))
f.write('\n'.encode(sys_default_enc))
def _read_vertices(self):
- for lnum, line in self.sections['[VERTICES]']:
- line = line.split(';')[0].strip()
+ for lnum, line in self.sections['[VERTICES]']: # noqa: B007
+ line = line.split(';')[0].strip() # noqa: PLW2901
current = line.split()
if current == []:
continue
- if len(current) != 3:
+ if len(current) != 3: # noqa: PLR2004
logger.warning('Invalid VERTICES line: %s', line)
continue
link_name = current[0]
link = self.wn.get_link(link_name)
- link._vertices.append((float(current[1]), float(current[2])))
+ link._vertices.append((float(current[1]), float(current[2]))) # noqa: SLF001
def _write_vertices(self, f, wn):
f.write('[VERTICES]\n'.encode(sys_default_enc))
@@ -2753,10 +2753,10 @@ def _write_vertices(self, f, wn):
label = '{:10s} {:10s} {:10s}\n'
f.write(label.format(';Link', 'X-Coord', 'Y-Coord').encode(sys_default_enc))
for name, link in wn.links():
- if link._is_isolated == True: # Sina added this
+ if link._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
for vert in (
- link._vertices
+ link._vertices # noqa: SLF001
): # Sina: I unindented this and the next line. Possible Bug in WNTR-1
f.write(entry.format(name, vert[0], vert[1]).encode(sys_default_enc))
@@ -2764,29 +2764,29 @@ def _write_vertices(self, f, wn):
def _read_labels(self):
labels = []
- for lnum, line in self.sections['[LABELS]']:
- line = line.split(';')[0].strip()
+ for lnum, line in self.sections['[LABELS]']: # noqa: B007
+ line = line.split(';')[0].strip() # noqa: PLW2901
current = line.split()
if current == []:
continue
labels.append(line)
- self.wn._labels = labels
+ self.wn._labels = labels # noqa: SLF001
def _write_labels(self, f, wn):
f.write('[LABELS]\n'.encode(sys_default_enc))
- if wn._labels is not None:
- for label in wn._labels:
+ if wn._labels is not None: # noqa: SLF001
+ for label in wn._labels: # noqa: SLF001
f.write(f' {label}\n'.encode(sys_default_enc))
f.write('\n'.encode(sys_default_enc))
def _read_backdrop(self):
- for lnum, line in self.sections['[BACKDROP]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[BACKDROP]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
key = current[0].upper()
- if key == 'DIMENSIONS' and len(current) > 4:
+ if key == 'DIMENSIONS' and len(current) > 4: # noqa: PLR2004
self.wn.options.graphics.dimensions = [
current[1],
current[2],
@@ -2797,7 +2797,7 @@ def _read_backdrop(self):
self.wn.options.graphics.units = current[1]
elif key == 'FILE' and len(current) > 1:
self.wn.options.graphics.image_filename = current[1]
- elif key == 'OFFSET' and len(current) > 2:
+ elif key == 'OFFSET' and len(current) > 2: # noqa: PLR2004
self.wn.options.graphics.offset = [current[1], current[2]]
def _write_backdrop(self, f, wn):
@@ -2828,8 +2828,8 @@ def _write_backdrop(self, f, wn):
f.write('\n'.encode(sys_default_enc))
def _read_tags(self):
- for lnum, line in self.sections['[TAGS]']:
- line = line.split(';')[0]
+ for lnum, line in self.sections['[TAGS]']: # noqa: B007
+ line = line.split(';')[0] # noqa: PLW2901
current = line.split()
if current == []:
continue
@@ -2851,7 +2851,7 @@ def _write_tags(self, f, wn):
# nnodes.sort()
for node_name in nnodes:
node = wn.nodes[node_name]
- if node._is_isolated == True: # Sina added this
+ if node._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
if node.tag:
f.write(
@@ -2862,7 +2862,7 @@ def _write_tags(self, f, wn):
for link_name in nlinks:
link = wn.links[link_name]
- if link._is_isolated == True: # Sina added this
+ if link._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
if link.tag:
f.write(
@@ -2873,7 +2873,7 @@ def _write_tags(self, f, wn):
# End of File
def _read_end(self):
- """Finalize read by verifying that all curves have been dealt with"""
+ """Finalize read by verifying that all curves have been dealt with""" # noqa: D400
def create_curve(curve_name):
curve_points = []
@@ -2887,12 +2887,12 @@ def create_curve(curve_name):
curve_points.append((x, y))
self.wn.add_curve(curve_name, None, curve_points)
curve = self.wn.get_curve(curve_name)
- return curve
+ return curve # noqa: RET504
curve_name_list = self.wn.curve_name_list
- for name, curvedata in self.curves.items():
+ for name, curvedata in self.curves.items(): # noqa: B007, PERF102
if name not in curve_name_list or self.wn.get_curve(name) is None:
- warnings.warn(
+ warnings.warn( # noqa: B028
f'Not all curves were used in "{self.wn.name}"; added with type None, units conversion left to user'
)
logger.warning(
@@ -2900,14 +2900,14 @@ def create_curve(curve_name):
)
create_curve(name)
- def _write_end(self, f, wn):
+ def _write_end(self, f, wn): # noqa: ARG002
f.write('[END]\n'.encode(sys_default_enc))
class _EpanetRule:
- """contains the text for an EPANET rule"""
+ """contains the text for an EPANET rule""" # noqa: D400
- def __init__(self, ruleID, inp_units=None, mass_units=None):
+ def __init__(self, ruleID, inp_units=None, mass_units=None): # noqa: N803
self.inp_units = inp_units
self.mass_units = mass_units
self.ruleID = ruleID
@@ -2917,23 +2917,23 @@ def __init__(self, ruleID, inp_units=None, mass_units=None):
self.priority = 0
@classmethod
- def parse_rules_lines(
+ def parse_rules_lines( # noqa: C901
cls,
lines,
flow_units=FlowUnits.SI,
mass_units=MassUnits.mg,
) -> list:
- rules = list()
+ rules = list() # noqa: C408
rule = None
in_if = False
in_then = False
in_else = False
- new_lines = list()
- new_line = list()
+ new_lines = list() # noqa: C408
+ new_line = list() # noqa: C408
for line in lines:
if isinstance(line, (tuple, list)):
- line = line[1]
- line = line.split(';')[0]
+ line = line[1] # noqa: PLW2901
+ line = line.split(';')[0] # noqa: PLW2901
words = line.strip().split()
for word in words:
if word.upper() in [
@@ -2948,7 +2948,7 @@ def parse_rules_lines(
if len(new_line) > 0:
text = ' '.join(new_line)
new_lines.append(text)
- new_line = list()
+ new_line = list() # noqa: C408
new_line.append(word)
if len(new_line) > 0:
text = ' '.join(new_line)
@@ -3000,60 +3000,60 @@ def parse_rules_lines(
return rules
def from_if_then_else(self, control):
- """Create a rule from a Rule object"""
+ """Create a rule from a Rule object""" # noqa: D400
if isinstance(control, Rule):
self.ruleID = control.name
- self.add_control_condition(control._condition)
- for ct, action in enumerate(control._then_actions):
+ self.add_control_condition(control._condition) # noqa: SLF001
+ for ct, action in enumerate(control._then_actions): # noqa: SLF001
if ct == 0:
self.add_action_on_true(action)
else:
self.add_action_on_true(action, ' AND')
- for ct, action in enumerate(control._else_actions):
+ for ct, action in enumerate(control._else_actions): # noqa: SLF001
if ct == 0:
self.add_action_on_false(action)
else:
self.add_action_on_false(action, ' AND')
- self.set_priority(control._priority)
+ self.set_priority(control._priority) # noqa: SLF001
else:
- raise ValueError(
- 'Invalid control type for rules: %s' % control.__class__.__name__
+ raise ValueError( # noqa: TRY004
+ 'Invalid control type for rules: %s' % control.__class__.__name__ # noqa: UP031
)
def add_if(self, clause):
- """Add an "if/and/or" clause from an INP file"""
+ """Add an "if/and/or" clause from an INP file""" # noqa: D400
self._if_clauses.append(clause)
- def add_control_condition(self, condition, prefix=' IF'):
- """Add a ControlCondition from an IfThenElseControl"""
+ def add_control_condition(self, condition, prefix=' IF'): # noqa: C901
+ """Add a ControlCondition from an IfThenElseControl""" # noqa: D400
if isinstance(condition, OrCondition):
- self.add_control_condition(condition._condition_1, prefix)
- self.add_control_condition(condition._condition_2, ' OR')
+ self.add_control_condition(condition._condition_1, prefix) # noqa: SLF001
+ self.add_control_condition(condition._condition_2, ' OR') # noqa: SLF001
elif isinstance(condition, AndCondition):
- self.add_control_condition(condition._condition_1, prefix)
- self.add_control_condition(condition._condition_2, ' AND')
+ self.add_control_condition(condition._condition_1, prefix) # noqa: SLF001
+ self.add_control_condition(condition._condition_2, ' AND') # noqa: SLF001
elif isinstance(condition, TimeOfDayCondition):
fmt = '{} SYSTEM CLOCKTIME {} {}'
clause = fmt.format(
prefix,
- condition._relation.text,
- condition._sec_to_clock(condition._threshold),
+ condition._relation.text, # noqa: SLF001
+ condition._sec_to_clock(condition._threshold), # noqa: SLF001
)
self.add_if(clause)
elif isinstance(condition, SimTimeCondition):
fmt = '{} SYSTEM TIME {} {}'
clause = fmt.format(
prefix,
- condition._relation.text,
- condition._sec_to_hours_min_sec(condition._threshold),
+ condition._relation.text, # noqa: SLF001
+ condition._sec_to_hours_min_sec(condition._threshold), # noqa: SLF001
)
self.add_if(clause)
elif isinstance(condition, ValueCondition):
fmt = (
'{} {} {} {} {} {}' # CONJ, TYPE, ID, ATTRIBUTE, RELATION, THRESHOLD
)
- attr = condition._source_attr
- val_si = condition._repr_value(attr, condition._threshold)
+ attr = condition._source_attr # noqa: SLF001
+ val_si = condition._repr_value(attr, condition._threshold) # noqa: SLF001
if attr.lower() == 'demand':
value = f'{from_si(self.inp_units, val_si, HydParam.Demand):.6g}'
elif attr.lower() in ['head', 'level']:
@@ -3065,14 +3065,14 @@ def add_control_condition(self, condition, prefix=' IF'):
elif attr.lower() == 'pressure':
value = f'{from_si(self.inp_units, val_si, HydParam.Pressure):.6g}'
elif attr.lower() == 'setting':
- if isinstance(condition._source_obj, Valve):
- if condition._source_obj.valve_type.upper() in [
+ if isinstance(condition._source_obj, Valve): # noqa: SLF001
+ if condition._source_obj.valve_type.upper() in [ # noqa: SLF001
'PRV',
'PBV',
'PSV',
]:
value = from_si(self.inp_units, val_si, HydParam.Pressure)
- elif condition._source_obj.valve_type.upper() == 'FCV':
+ elif condition._source_obj.valve_type.upper() == 'FCV': # noqa: SLF001
value = from_si(self.inp_units, val_si, HydParam.Flow)
else:
value = val_si
@@ -3081,34 +3081,34 @@ def add_control_condition(self, condition, prefix=' IF'):
value = f'{value:.6g}'
else: # status
value = val_si
- if isinstance(condition._source_obj, Valve):
+ if isinstance(condition._source_obj, Valve): # noqa: SLF001
cls = 'Valve'
- elif isinstance(condition._source_obj, Pump):
+ elif isinstance(condition._source_obj, Pump): # noqa: SLF001
cls = 'Pump'
else:
- cls = condition._source_obj.__class__.__name__
+ cls = condition._source_obj.__class__.__name__ # noqa: SLF001
clause = fmt.format(
prefix,
cls,
- condition._source_obj.name,
- condition._source_attr,
- condition._relation.symbol,
+ condition._source_obj.name, # noqa: SLF001
+ condition._source_attr, # noqa: SLF001
+ condition._relation.symbol, # noqa: SLF001
value,
)
self.add_if(clause)
else:
- raise ValueError('Unknown ControlCondition for EPANET Rules')
+ raise ValueError('Unknown ControlCondition for EPANET Rules') # noqa: EM101, TRY003, TRY004
def add_then(self, clause):
- """Add a "then/and" clause from an INP file"""
+ """Add a "then/and" clause from an INP file""" # noqa: D400
self._then_clauses.append(clause)
- def add_action_on_true(self, action, prefix=' THEN'):
- """Add a "then" action from an IfThenElseControl"""
+ def add_action_on_true(self, action, prefix=' THEN'): # noqa: C901
+ """Add a "then" action from an IfThenElseControl""" # noqa: D400
if isinstance(action, ControlAction):
fmt = '{} {} {} {} = {}'
- attr = action._attribute
- val_si = action._repr_value()
+ attr = action._attribute # noqa: SLF001
+ val_si = action._repr_value() # noqa: SLF001
if attr.lower() == 'demand':
value = f'{from_si(self.inp_units, val_si, HydParam.Demand):.6g}'
elif attr.lower() in ['head', 'level']:
@@ -3148,15 +3148,15 @@ def add_action_on_true(self, action, prefix=' THEN'):
self.add_then(clause)
def add_else(self, clause):
- """Add an "else/and" clause from an INP file"""
+ """Add an "else/and" clause from an INP file""" # noqa: D400
self._else_clauses.append(clause)
- def add_action_on_false(self, action, prefix=' ELSE'):
- """Add an "else" action from an IfThenElseControl"""
+ def add_action_on_false(self, action, prefix=' ELSE'): # noqa: C901
+ """Add an "else" action from an IfThenElseControl""" # noqa: D400
if isinstance(action, ControlAction):
fmt = '{} {} {} {} = {}'
- attr = action._attribute
- val_si = action._repr_value()
+ attr = action._attribute # noqa: SLF001
+ val_si = action._repr_value() # noqa: SLF001
if attr.lower() == 'demand':
value = f'{from_si(self.inp_units, val_si, HydParam.Demand):.6g}'
elif attr.lower() in ['head', 'level']:
@@ -3208,7 +3208,7 @@ def __str__(self):
'\n'.join(self._else_clauses),
self.priority,
)
- else:
+ else: # noqa: RET505
return 'RULE {}\n{}\n{}\n PRIORITY {}\n ; end of rule\n'.format(
self.ruleID,
'\n'.join(self._if_clauses),
@@ -3229,14 +3229,14 @@ def __str__(self):
'\n'.join(self._then_clauses),
)
- def generate_control(self, model):
+ def generate_control(self, model): # noqa: C901
condition_list = []
for line in self._if_clauses:
condition = None
words = line.split()
if words[1].upper() == 'SYSTEM':
if words[2].upper() == 'DEMAND':
- # TODO: system demand
+ # TODO: system demand # noqa: TD002
pass
elif words[2].upper() == 'TIME':
condition = SimTimeCondition(
@@ -3248,7 +3248,7 @@ def generate_control(self, model):
)
else:
attr = words[3].lower()
- value = ValueCondition._parse_value(words[5])
+ value = ValueCondition._parse_value(words[5]) # noqa: SLF001
if attr.lower() == 'demand':
value = to_si(self.inp_units, value, HydParam.Demand)
elif attr.lower() == 'head' or attr.lower() == 'level':
@@ -3260,7 +3260,7 @@ def generate_control(self, model):
elif attr.lower() == 'setting':
link = model.get_link(words[2])
if isinstance(link, wntrfr.network.Pump):
- value = value
+ value = value # noqa: PLW0127
elif isinstance(link, wntrfr.network.Valve):
if link.valve_type.upper() in ['PRV', 'PBV', 'PSV']:
value = to_si(self.inp_units, value, HydParam.Pressure)
@@ -3281,7 +3281,7 @@ def generate_control(self, model):
value,
)
else:
- # FIXME: raise error
+ # FIXME: raise error # noqa: FIX001, TD001, TD002
pass
if words[0].upper() == 'IF' or words[0].upper() == 'AND':
condition_list.append(condition)
@@ -3290,7 +3290,7 @@ def generate_control(self, model):
other = condition_list[-1]
condition_list.remove(other)
else:
- # FIXME: raise error
+ # FIXME: raise error # noqa: FIX001, TD001, TD002
pass
conj = OrCondition(other, condition)
condition_list.append(conj)
@@ -3303,12 +3303,12 @@ def generate_control(self, model):
then_acts = []
for act in self._then_clauses:
words = act.strip().split()
- if len(words) < 6:
- # TODO: raise error
+ if len(words) < 6: # noqa: PLR2004
+ # TODO: raise error # noqa: TD002
pass
link = model.get_link(words[2])
attr = words[3].lower()
- value = ValueCondition._parse_value(words[5])
+ value = ValueCondition._parse_value(words[5]) # noqa: SLF001
if attr.lower() == 'demand':
value = to_si(self.inp_units, value, HydParam.Demand)
elif attr.lower() in ['head', 'level']:
@@ -3327,12 +3327,12 @@ def generate_control(self, model):
else_acts = []
for act in self._else_clauses:
words = act.strip().split()
- if len(words) < 6:
- # TODO: raise error
+ if len(words) < 6: # noqa: PLR2004
+ # TODO: raise error # noqa: TD002
pass
link = model.get_link(words[2])
attr = words[3].lower()
- value = ValueCondition._parse_value(words[5])
+ value = ValueCondition._parse_value(words[5]) # noqa: SLF001
if attr.lower() == 'demand':
value = to_si(self.inp_units, value, HydParam.Demand)
elif attr.lower() in ['head', 'level']:
@@ -3392,10 +3392,10 @@ class BinFile:
def __init__(
self,
result_types=None,
- network=False,
- energy=False,
- statistics=False,
- convert_status=True,
+ network=False, # noqa: FBT002
+ energy=False, # noqa: FBT002
+ statistics=False, # noqa: FBT002
+ convert_status=True, # noqa: FBT002
):
if os.name in ['nt', 'dos'] or sys.platform == 'darwin':
self.ftype = '=f4'
@@ -3491,12 +3491,12 @@ def finalize_save(self, good_read, sim_warnings):
"""
# @run_lineprofile()
- def read(
+ def read( # noqa: C901, PLR0915
self,
filename,
- convergence_error=False,
- darcy_weisbach=False,
- convert=True,
+ convergence_error=False, # noqa: FBT002
+ darcy_weisbach=False, # noqa: FBT002
+ convert=True, # noqa: FBT002
):
"""Read a binary file and create a results object.
@@ -3520,9 +3520,9 @@ def read(
logger.debug('Read binary EPANET data from %s', filename)
dt_str = 'u1' # .format(self.idlen)
- with open(filename, 'rb') as fin:
+ with open(filename, 'rb') as fin: # noqa: PTH123
ftype = self.ftype
- idlen = self.idlen
+ idlen = self.idlen # noqa: F841
logger.debug('... read prolog information ...')
prolog = np.fromfile(fin, dtype=np.int32, count=15)
magic1 = prolog[0]
@@ -3612,18 +3612,18 @@ def read(
]
self.node_names = np.array(nodenames)
self.link_names = np.array(linknames)
- linkstart = np.array(
+ linkstart = np.array( # noqa: F841
np.fromfile(fin, dtype=np.int32, count=nlinks), dtype=int
)
- linkend = np.array(
+ linkend = np.array( # noqa: F841
np.fromfile(fin, dtype=np.int32, count=nlinks), dtype=int
)
linktype = np.fromfile(fin, dtype=np.int32, count=nlinks)
- tankidxs = np.fromfile(fin, dtype=np.int32, count=ntanks)
- tankarea = np.fromfile(fin, dtype=np.dtype(ftype), count=ntanks)
- elevation = np.fromfile(fin, dtype=np.dtype(ftype), count=nnodes)
- linklen = np.fromfile(fin, dtype=np.dtype(ftype), count=nlinks)
- diameter = np.fromfile(fin, dtype=np.dtype(ftype), count=nlinks)
+ tankidxs = np.fromfile(fin, dtype=np.int32, count=ntanks) # noqa: F841
+ tankarea = np.fromfile(fin, dtype=np.dtype(ftype), count=ntanks) # noqa: F841
+ elevation = np.fromfile(fin, dtype=np.dtype(ftype), count=nnodes) # noqa: F841
+ linklen = np.fromfile(fin, dtype=np.dtype(ftype), count=nlinks) # noqa: F841
+ diameter = np.fromfile(fin, dtype=np.dtype(ftype), count=nlinks) # noqa: F841
"""
self.save_network_desc_line('link_start', linkstart)
self.save_network_desc_line('link_end', linkend)
@@ -3635,7 +3635,7 @@ def read(
self.save_network_desc_line('link_diameter', diameter)
"""
logger.debug('... read energy data ...')
- for i in range(npumps):
+ for i in range(npumps): # noqa: B007
pidx = int(np.fromfile(fin, dtype=np.int32, count=1))
energy = np.fromfile(fin, dtype=np.dtype(ftype), count=6)
self.save_energy_line(pidx, linknames[pidx - 1], energy)
@@ -3649,7 +3649,7 @@ def read(
reportstep,
)
nrptsteps = len(reporttimes)
- statsN = nrptsteps
+ statsN = nrptsteps # noqa: N806, F841
if statsflag in [
StatisticsType.Maximum,
StatisticsType.Minimum,
@@ -3732,14 +3732,14 @@ def read(
count=(4 * nnodes + 8 * nlinks) * nrptsteps,
)
except Exception as e:
- logger.exception('Failed to process file: %s', e)
+ logger.exception('Failed to process file: %s', e) # noqa: TRY401
- N = int(np.floor(len(data) / (4 * nnodes + 8 * nlinks)))
+ N = int(np.floor(len(data) / (4 * nnodes + 8 * nlinks))) # noqa: N806
if nrptsteps > N:
t = reporttimes[N]
if convergence_error:
logger.error(
- 'Simulation did not converge at time '
+ 'Simulation did not converge at time ' # noqa: G003
+ self._get_time(t)
+ '.'
)
@@ -3748,11 +3748,11 @@ def read(
+ self._get_time(t)
+ '.'
)
- else:
+ else: # noqa: RET506
data = data[0 : N * (4 * nnodes + 8 * nlinks)]
data = np.reshape(data, (N, (4 * nnodes + 8 * nlinks)))
reporttimes = reporttimes[0:N]
- warnings.warn(
+ warnings.warn( # noqa: B028
'Simulation did not converge at time '
+ self._get_time(t)
+ '.'
@@ -3762,8 +3762,8 @@ def read(
data = np.reshape(data, (nrptsteps, (4 * nnodes + 8 * nlinks)))
self.results.error_code = None
- df = pd.DataFrame(data.transpose(), index=index, columns=reporttimes)
- df = df.transpose()
+ df = pd.DataFrame(data.transpose(), index=index, columns=reporttimes) # noqa: PD901
+ df = df.transpose() # noqa: PD901
self.results.node = {}
self.results.link = {}
@@ -3771,31 +3771,31 @@ def read(
if convert:
# Node Results
- self.results.node['demand'] = HydParam.Demand._to_si(
+ self.results.node['demand'] = HydParam.Demand._to_si( # noqa: SLF001
self.flow_units, df['demand']
)
- self.results.node['head'] = HydParam.HydraulicHead._to_si(
+ self.results.node['head'] = HydParam.HydraulicHead._to_si( # noqa: SLF001
self.flow_units, df['head']
)
- self.results.node['pressure'] = HydParam.Pressure._to_si(
+ self.results.node['pressure'] = HydParam.Pressure._to_si( # noqa: SLF001
self.flow_units, df['pressure']
)
# Water Quality Results (node and link)
if self.quality_type is QualType.Chem:
- self.results.node['quality'] = QualParam.Concentration._to_si(
+ self.results.node['quality'] = QualParam.Concentration._to_si( # noqa: SLF001
self.flow_units, df['quality'], mass_units=self.mass_units
)
- self.results.link['quality'] = QualParam.Concentration._to_si(
+ self.results.link['quality'] = QualParam.Concentration._to_si( # noqa: SLF001
self.flow_units,
df['linkquality'],
mass_units=self.mass_units,
)
elif self.quality_type is QualType.Age:
- self.results.node['quality'] = QualParam.WaterAge._to_si(
+ self.results.node['quality'] = QualParam.WaterAge._to_si( # noqa: SLF001
self.flow_units, df['quality'], mass_units=self.mass_units
)
- self.results.link['quality'] = QualParam.WaterAge._to_si(
+ self.results.link['quality'] = QualParam.WaterAge._to_si( # noqa: SLF001
self.flow_units,
df['linkquality'],
mass_units=self.mass_units,
@@ -3805,22 +3805,22 @@ def read(
self.results.link['quality'] = df['linkquality']
# Link Results
- self.results.link['flowrate'] = HydParam.Flow._to_si(
+ self.results.link['flowrate'] = HydParam.Flow._to_si( # noqa: SLF001
self.flow_units, df['flow']
)
- self.results.link['velocity'] = HydParam.Velocity._to_si(
+ self.results.link['velocity'] = HydParam.Velocity._to_si( # noqa: SLF001
self.flow_units, df['velocity']
)
headloss = np.array(df['headloss'])
- headloss[:, linktype < 2] = to_si(
+ headloss[:, linktype < 2] = to_si( # noqa: PLR2004
self.flow_units,
- headloss[:, linktype < 2],
+ headloss[:, linktype < 2], # noqa: PLR2004
HydParam.HeadLoss,
) # Pipe or CV
- headloss[:, linktype >= 2] = to_si(
+ headloss[:, linktype >= 2] = to_si( # noqa: PLR2004
self.flow_units,
- headloss[:, linktype >= 2],
+ headloss[:, linktype >= 2], # noqa: PLR2004
HydParam.Length,
) # Pump or Valve
self.results.link['headloss'] = pd.DataFrame(
@@ -3829,10 +3829,10 @@ def read(
status = np.array(df['linkstatus'])
if self.convert_status:
- status[status <= 2] = 0
- status[status == 3] = 1
- status[status >= 5] = 1
- status[status == 4] = 2
+ status[status <= 2] = 0 # noqa: PLR2004
+ status[status == 3] = 1 # noqa: PLR2004
+ status[status >= 5] = 1 # noqa: PLR2004
+ status[status == 4] = 2 # noqa: PLR2004
self.results.link['status'] = pd.DataFrame(
data=status, columns=linknames, index=reporttimes
)
@@ -3868,7 +3868,7 @@ def read(
)
self.results.link['friction_factor'] = df['frictionfactor']
- self.results.link['reaction_rate'] = QualParam.ReactionRate._to_si(
+ self.results.link['reaction_rate'] = QualParam.ReactionRate._to_si( # noqa: SLF001
self.flow_units, df['reactionrate'], self.mass_units
)
else:
@@ -3906,7 +3906,7 @@ def read(
return self.results
-class NoSectionError(Exception):
+class NoSectionError(Exception): # noqa: D101
pass
@@ -3916,8 +3916,8 @@ def __init__(self, f):
----------
f: str
- """
- self._f = open(f)
+ """ # noqa: D205
+ self._f = open(f) # noqa: SIM115, PTH123
self._num_lines = len(self._f.readlines())
self._end = self._f.tell()
self._f.seek(0)
@@ -3926,7 +3926,7 @@ def __init__(self, f):
def f(self):
return self._f
- def iter(self, start=0, stop=None, skip_section_headings=True):
+ def iter(self, start=0, stop=None, skip_section_headings=True): # noqa: FBT002
if stop is None:
stop = self._end
f = self.f
@@ -3957,12 +3957,12 @@ def get_section(self, sec):
end: int
The ending point in the file for sec
- """
+ """ # noqa: D205
start = None
end = None
in_sec = False
for loc, line in self.iter(0, None, skip_section_headings=False):
- line = line.split(';')[0]
+ line = line.split(';')[0] # noqa: PLW2901
if sec in line:
start = loc
in_sec = True
@@ -3982,10 +3982,10 @@ def contains_section(self, sec):
----------
sec: str
- """
+ """ # noqa: D205
try:
self.get_section(sec)
- return True
+ return True # noqa: TRY300
except NoSectionError:
return False
@@ -3999,19 +3999,19 @@ def _convert_line(line): # pragma: no cover
-------
list
- """
+ """ # noqa: D205
line = line.upper().split()
tmp = []
for i in line:
if '.' in i:
try:
tmp.append(float(i))
- except:
+ except: # noqa: E722
tmp.append(i)
else:
try:
tmp.append(int(i))
- except:
+ except: # noqa: E722
tmp.append(i)
return tmp
@@ -4027,7 +4027,7 @@ def _compare_lines(line1, line2, tol=1e-14):
-------
bool
- """
+ """ # noqa: D205
if len(line1) != len(line2):
return False
@@ -4058,9 +4058,9 @@ def _clean_line(wn, sec, line): # pragma: no cover
-------
new_list: list of str
- """
+ """ # noqa: D205
if sec == '[JUNCTIONS]':
- if len(line) == 4:
+ if len(line) == 4: # noqa: PLR2004
other = wn.options.hydraulic.pattern
if other is None:
other = 1
@@ -4072,7 +4072,7 @@ def _clean_line(wn, sec, line): # pragma: no cover
return line
-def _read_control_line(line, wn, flow_units, control_name):
+def _read_control_line(line, wn, flow_units, control_name): # noqa: C901
"""Parameters
----------
line: str
@@ -4084,7 +4084,7 @@ def _read_control_line(line, wn, flow_units, control_name):
-------
control_obj: Control
- """
+ """ # noqa: D205
line = line.split(';')[0]
current = line.split()
if current == []:
@@ -4100,7 +4100,7 @@ def _read_control_line(line, wn, flow_units, control_name):
status = current[2].upper()
if (
- status == 'OPEN'
+ status == 'OPEN' # noqa: PLR1714
or status == 'OPENED'
or status == 'CLOSED'
or status == 'ACTIVE'
@@ -4113,7 +4113,7 @@ def _read_control_line(line, wn, flow_units, control_name):
)
elif isinstance(link, wntrfr.network.Valve):
if (
- link.valve_type == 'PRV'
+ link.valve_type == 'PRV' # noqa: PLR1714
or link.valve_type == 'PSV'
or link.valve_type == 'PBV'
):
@@ -4125,13 +4125,13 @@ def _read_control_line(line, wn, flow_units, control_name):
elif link.valve_type == 'GPV':
setting = current[2]
else:
- raise ValueError(
- f'Unrecognized valve type {link.valve_type} while parsing control {line}'
+ raise ValueError( # noqa: TRY003
+ f'Unrecognized valve type {link.valve_type} while parsing control {line}' # noqa: EM102
)
action_obj = wntrfr.network.ControlAction(link, 'setting', setting)
else:
raise RuntimeError(
- f'Links of type {type(link)} can only have controls that change\n'
+ f'Links of type {type(link)} can only have controls that change\n' # noqa: ISC003
+ f'the link status. Control: {line}'
)
@@ -4158,14 +4158,14 @@ def _read_control_line(line, wn, flow_units, control_name):
threshold = to_si(
flow_units, float(current[7]), HydParam.Pressure
) # + node.elevation
- control_obj = Control._conditional_control(
+ control_obj = Control._conditional_control( # noqa: SLF001
node, 'pressure', oper, threshold, action_obj, control_name
)
elif node.node_type == 'Tank':
threshold = to_si(
flow_units, float(current[7]), HydParam.HydraulicHead
) # + node.elevation
- control_obj = Control._conditional_control(
+ control_obj = Control._conditional_control( # noqa: SLF001
node, 'level', oper, threshold, action_obj, control_name
)
else:
@@ -4176,17 +4176,17 @@ def _read_control_line(line, wn, flow_units, control_name):
# control_name = control_name + '/' + str(round(threshold, 2))
elif 'CLOCKTIME' not in current: # at time
if 'TIME' not in current:
- raise ValueError(f'Unrecognized line in inp file: {line}')
+ raise ValueError(f'Unrecognized line in inp file: {line}') # noqa: EM102, TRY003
if ':' in current[5]:
run_at_time = int(_str_time_to_sec(current[5]))
else:
run_at_time = int(float(current[5]) * 3600)
- control_obj = Control._time_control(
+ control_obj = Control._time_control( # noqa: SLF001
wn,
run_at_time,
'SIM_TIME',
- False,
+ False, # noqa: FBT003
action_obj,
control_name,
)
@@ -4195,18 +4195,18 @@ def _read_control_line(line, wn, flow_units, control_name):
# control_name = control_name + '/' + current[i]
# control_name = control_name + '/' + str(run_at_time)
else: # at clocktime
- if len(current) < 7:
+ if len(current) < 7: # noqa: PLR2004
if ':' in current[5]:
run_at_time = int(_str_time_to_sec(current[5]))
else:
run_at_time = int(float(current[5]) * 3600)
else:
run_at_time = int(_clock_time_to_sec(current[5], current[6]))
- control_obj = Control._time_control(
+ control_obj = Control._time_control( # noqa: SLF001
wn,
run_at_time,
'CLOCK_TIME',
- True,
+ True, # noqa: FBT003
action_obj,
control_name,
)
@@ -4217,7 +4217,7 @@ def _read_control_line(line, wn, flow_units, control_name):
return control_obj
-def _diff_inp_files(
+def _diff_inp_files( # noqa: C901
file1,
file2=None,
float_tol=1e-8,
@@ -4232,7 +4232,7 @@ def _diff_inp_files(
max_diff_lines_per_section: int
htmldiff_file: str
- """
+ """ # noqa: D205
wn = InpFile().read(file1)
f1 = _InpFileDifferHelper(file1)
if file2 is None:
@@ -4247,7 +4247,7 @@ def _diff_inp_files(
for section in _INP_SECTIONS:
if not f1.contains_section(section):
if f2.contains_section(section):
- print(f'\tfile1 does not contain section {section} but file2 does.')
+ print(f'\tfile1 does not contain section {section} but file2 does.') # noqa: T201
continue
start1, stop1 = f1.get_section(section)
start2, stop2 = f2.get_section(section)
@@ -4296,33 +4296,33 @@ def _diff_inp_files(
assert len(different_lines_1) == len(different_lines_2)
n1 = 0
n2 = 0
- for loc1, line1 in new_lines_1:
+ for loc1, line1 in new_lines_1: # noqa: B007
different_lines_1.append(line1)
n1 += 1
- for loc2, line2 in new_lines_2:
+ for loc2, line2 in new_lines_2: # noqa: B007
different_lines_2.append(line2)
n2 += 1
if n1 > n2:
n = n1 - n2
- for i in range(n):
- different_lines_2.append('')
+ for i in range(n): # noqa: B007
+ different_lines_2.append('') # noqa: PERF401
elif n2 > n1:
n = n2 - n1
- for i in range(n):
- different_lines_1.append('')
+ for i in range(n): # noqa: B007
+ different_lines_1.append('') # noqa: PERF401
else:
- raise RuntimeError('Unexpected')
+ raise RuntimeError('Unexpected') # noqa: EM101
continue
section_line_counter = 0
f2_iter = iter(new_lines_2)
- for loc1, line1 in new_lines_1:
+ for loc1, line1 in new_lines_1: # noqa: B007
orig_line_1 = line1
loc2, line2 = next(f2_iter)
orig_line_2 = line2
- line1 = _convert_line(line1)
+ line1 = _convert_line(line1) # noqa: PLW2901
line2 = _convert_line(line2)
- line1 = _clean_line(wn, section, line1)
+ line1 = _clean_line(wn, section, line1) # noqa: PLW2901
line2 = _clean_line(wn, section, line2)
if not _compare_lines(line1, line2, tol=float_tol):
if section_line_counter < max_diff_lines_per_section:
@@ -4332,7 +4332,7 @@ def _diff_inp_files(
different_lines_1.append(orig_line_1)
different_lines_2.append(orig_line_2)
- if len(different_lines_1) < 200: # If lines < 200 use difflib
+ if len(different_lines_1) < 200: # If lines < 200 use difflib # noqa: PLR2004
differ = difflib.HtmlDiff()
html_diff = differ.make_file(different_lines_1, different_lines_2)
else: # otherwise, create a simple html file
@@ -4341,7 +4341,7 @@ def _diff_inp_files(
).transpose()
html_diff = differ_df.to_html()
- g = open(htmldiff_file, 'w')
+ g = open(htmldiff_file, 'w') # noqa: SIM115, PTH123
g.write(html_diff)
g.close()
diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/toolkit.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/toolkit.py
index b0d4b9031..a2d5d265e 100644
--- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/toolkit.py
+++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/epanet/toolkit.py
@@ -1,7 +1,7 @@
"""Created on Wed May 26 16:11:36 2021
@author: snaeimi
-"""
+""" # noqa: D400
import ctypes
import logging
@@ -16,39 +16,39 @@
logger = logging.getLogger(__name__)
-class EpanetException(Exception):
+class EpanetException(Exception): # noqa: N818, D101
pass
-class ENepanet(wntrfr.epanet.toolkit.ENepanet):
- def __init__(
+class ENepanet(wntrfr.epanet.toolkit.ENepanet): # noqa: D101
+ def __init__( # noqa: C901
self,
inpfile='',
rptfile='',
binfile='',
- changed_epanet=False,
+ changed_epanet=False, # noqa: FBT002
version=2.2,
):
- if changed_epanet == False or changed_epanet == True:
+ if changed_epanet == False or changed_epanet == True: # noqa: E712, PLR1714
self.changed_epanet = changed_epanet
else:
- raise ValueError('changed_epanet must be a boolean value')
+ raise ValueError('changed_epanet must be a boolean value') # noqa: EM101, TRY003
- if changed_epanet == False:
+ if changed_epanet == False: # noqa: E712
super().__init__(inpfile, rptfile, binfile, version=version)
else:
- try:
+ try: # noqa: SIM105
super().__init__(inpfile, rptfile, binfile, version=version)
- except:
+ except: # noqa: S110, E722
pass # to add robustness for the time when for the WNTR
# cannot load the umodified DLLs for any reason
- if float(version) != 2.2:
- raise ValueError(
- 'EPANET version must be 2.2 when using the changed version'
+ if float(version) != 2.2: # noqa: PLR2004
+ raise ValueError( # noqa: TRY003
+ 'EPANET version must be 2.2 when using the changed version' # noqa: EM101
)
- elif float(version) == 2.2:
+ elif float(version) == 2.2: # noqa: RET506, PLR2004
libnames = ['epanet22_mod', 'epanet22_win32_mod']
if '64' in platform.machine():
libnames.insert(0, 'epanet22_amd64_mod')
@@ -57,39 +57,39 @@ def __init__(
if os.name in ['nt', 'dos']:
libepanet = resource_filename(
__name__,
- 'Windows/%s.dll' % lib,
+ 'Windows/%s.dll' % lib, # noqa: UP031
)
self.ENlib = ctypes.windll.LoadLibrary(libepanet)
elif sys.platform == 'darwin':
libepanet = resource_filename(
__name__,
- 'Darwin/lib%s.dylib' % lib,
+ 'Darwin/lib%s.dylib' % lib, # noqa: UP031
)
self.ENlib = ctypes.cdll.LoadLibrary(libepanet)
else:
libepanet = resource_filename(
__name__,
- 'Linux/lib%s.so' % lib,
+ 'Linux/lib%s.so' % lib, # noqa: UP031
)
self.ENlib = ctypes.cdll.LoadLibrary(libepanet)
- return
- except Exception as E1:
+ return # noqa: TRY300
+ except Exception as E1: # noqa: PERF203
if lib == libnames[-1]:
- raise E1
+ raise E1 # noqa: TRY201
finally:
- if version >= 2.2 and '32' not in lib:
+ if version >= 2.2 and '32' not in lib: # noqa: PLR2004
self._project = ctypes.c_uint64()
- elif version >= 2.2:
+ elif version >= 2.2: # noqa: PLR2004
self._project = ctypes.c_uint32()
else:
self._project = None
- def ENSetIgnoreFlag(self, ignore_flag=0):
- if abs(ignore_flag - np.round(ignore_flag)) > 0.00001 or ignore_flag < 0:
+ def ENSetIgnoreFlag(self, ignore_flag=0): # noqa: N802, D102
+ if abs(ignore_flag - np.round(ignore_flag)) > 0.00001 or ignore_flag < 0: # noqa: PLR2004
logger.error(
- 'ignore_flag must be int value and bigger than zero'
+ 'ignore_flag must be int value and bigger than zero' # noqa: G003
+ str(ignore_flag)
)
- flag = ctypes.c_int(int(ignore_flag))
+ flag = ctypes.c_int(int(ignore_flag)) # noqa: F841
# print('++++++++++++++++++++++')
# self.ENlib.ENEXTENDEDsetignoreflag(flag)
diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/morph/link.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/morph/link.py
index 226e3d8df..584acb288 100644
--- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/morph/link.py
+++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/morph/link.py
@@ -1,4 +1,4 @@
-"""The wntrfr.morph.link module contains functions to split/break pipes."""
+"""The wntrfr.morph.link module contains functions to split/break pipes.""" # noqa: INP001
import copy
import logging
@@ -13,9 +13,9 @@ def split_pipe(
pipe_name_to_split,
new_pipe_name,
new_junction_name,
- add_pipe_at_end=True,
+ add_pipe_at_end=True, # noqa: FBT002
split_at_point=0.5,
- return_copy=True,
+ return_copy=True, # noqa: FBT002
):
"""Split a pipe by adding a junction and one new pipe segment.
@@ -77,7 +77,7 @@ def split_pipe(
return_copy,
)
- return wn2
+ return wn2 # noqa: RET504
def break_pipe(
@@ -86,9 +86,9 @@ def break_pipe(
new_pipe_name,
new_junction_name_old_pipe,
new_junction_name_new_pipe,
- add_pipe_at_end=True,
+ add_pipe_at_end=True, # noqa: FBT002
split_at_point=0.5,
- return_copy=True,
+ return_copy=True, # noqa: FBT002
):
"""Break a pipe by adding a two unconnected junctions and one new pipe segment.
@@ -156,10 +156,10 @@ def break_pipe(
return_copy,
)
- return wn2
+ return wn2 # noqa: RET504
-def _split_or_break_pipe(
+def _split_or_break_pipe( # noqa: C901
wn,
pipe_name_to_split,
new_pipe_name,
@@ -178,9 +178,9 @@ def _split_or_break_pipe(
# Do sanity checks
if not isinstance(pipe, Pipe):
- raise ValueError('You can only split pipes.')
+ raise ValueError('You can only split pipes.') # noqa: EM101, TRY003, TRY004
if split_at_point < 0 or split_at_point > 1:
- raise ValueError('split_at_point must be between 0 and 1')
+ raise ValueError('split_at_point must be between 0 and 1') # noqa: EM101, TRY003
# Sina edited here
# node_list = [node_name for node_name, node in wn2.nodes()]
# link_list = [link_name for link_name, link in wn2.links()]
@@ -222,7 +222,7 @@ def _split_or_break_pipe(
elevation=junction_elevation,
coordinates=junction_coordinates,
)
- if len(new_junction_names) == 2:
+ if len(new_junction_names) == 2: # noqa: PLR2004
wn2.add_junction(
new_junction_names[1],
base_demand=0.0,
diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/__init__.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/__init__.py
index e69de29bb..b74acee6d 100644
--- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/__init__.py
+++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/__init__.py
@@ -0,0 +1 @@
+# noqa: N999, D104
diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py
index cc41e6394..10d48d8e3 100644
--- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py
+++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/network/model.py
@@ -12,7 +12,7 @@
NodeRegistry
LinkRegistry
-"""
+""" # noqa: D205
import logging
import math
@@ -23,7 +23,7 @@
from wntrfr.network.base import LinkStatus
from wntrfr.network.elements import Pump, Valve
-from ..epanet.io import InpFile
+from ..epanet.io import InpFile # noqa: TID252
logger = logging.getLogger(__name__)
@@ -44,10 +44,10 @@ def __init__(self, inp_file_name=None):
self.breakage_link = {}
self.expicit_leak = []
- def updateWaterNetworkModelWithResult(
+ def updateWaterNetworkModelWithResult( # noqa: C901, N802
self,
result,
- registry,
+ registry, # noqa: ARG002
latest_simulation_time=None,
):
"""Updates Water Network Model consistent with result model. must be only
@@ -75,13 +75,13 @@ def updateWaterNetworkModelWithResult(
-------
None.
- """
+ """ # noqa: D205, D401
max_time = result.node['head'].index.max()
- if latest_simulation_time == None:
+ if latest_simulation_time == None: # noqa: E711
latest_simulation_time = max_time
elif latest_simulation_time != max_time:
- raise ValueError(
- 'Provided LATEST SIMULATION TIME id not consistent with the latest time in RESULT'
+ raise ValueError( # noqa: TRY003
+ 'Provided LATEST SIMULATION TIME id not consistent with the latest time in RESULT' # noqa: EM101
)
avilable_tank_name_list = set(self.tank_name_list).intersection(
@@ -94,7 +94,7 @@ def updateWaterNetworkModelWithResult(
head = None
cur_node = self.get_node(tank_name)
- if cur_node._is_isolated:
+ if cur_node._is_isolated: # noqa: SLF001
continue
head = result.node['head'].loc[max_time, tank_name]
@@ -107,12 +107,12 @@ def updateWaterNetworkModelWithResult(
tank_level = cur_node.max_level
cur_node.init_level = abs(tank_level)
- cur_node._head = cur_node.elevation + tank_level
+ cur_node._head = cur_node.elevation + tank_level # noqa: SLF001
if tank_level < 0.0:
- logger.error('head= ' + repr(head))
- logger.error('elevation= ' + repr(cur_node.elevation))
- logger.error('tank_level= ' + repr(tank_level))
+ logger.error('head= ' + repr(head)) # noqa: G003
+ logger.error('elevation= ' + repr(cur_node.elevation)) # noqa: G003
+ logger.error('tank_level= ' + repr(tank_level)) # noqa: G003
raise ValueError(
'Tank Level for ' + tank_name + ' is less than zero'
)
@@ -124,7 +124,7 @@ def updateWaterNetworkModelWithResult(
try:
setting = result.link['setting'].loc[max_time, link_name]
status = result.link['status'].loc[max_time, link_name]
- except:
+ except: # noqa: S112, E722
# logger.error(link_name + ' exist in WaterNetwork but does not exist in result')
# raise ValueError(link_name + ' exist in WaterNetwork but does not exist in result')
continue
@@ -136,17 +136,17 @@ def updateWaterNetworkModelWithResult(
link.setting.base_value = float(setting)
if status == 0:
- link._user_status = LinkStatus.Closed
+ link._user_status = LinkStatus.Closed # noqa: SLF001
elif status == 1:
- link._user_status = LinkStatus.Open
+ link._user_status = LinkStatus.Open # noqa: SLF001
- elif status == 2:
- link._user_status = LinkStatus.Active
+ elif status == 2: # noqa: PLR2004
+ link._user_status = LinkStatus.Active # noqa: SLF001
else:
- logger.error('Element type is: ' + repr(type(link)))
- logger.error('Status is : ' + repr(status))
+ logger.error('Element type is: ' + repr(type(link))) # noqa: G003
+ logger.error('Status is : ' + repr(status)) # noqa: G003
def read_inpfile(self, filename):
"""Defines water network model components from an EPANET INP file
@@ -156,7 +156,7 @@ def read_inpfile(self, filename):
filename : string
Name of the INP file.
- """
+ """ # noqa: D400, D401
inpfile = InpFile()
inpfile.read(filename, wn=self)
self._inpfile = inpfile
@@ -171,7 +171,7 @@ def write_inpfile(self, filename, units=None):
units : str, int or FlowUnits
Name of the units being written to the inp file.
- """
+ """ # noqa: D400, D401
if self._inpfile is None:
logger.warning(
'Writing a minimal INP file without saved non-WNTR options (energy, etc.)'
@@ -181,15 +181,15 @@ def write_inpfile(self, filename, units=None):
units = self._options.hydraulic.en2_units
self._inpfile.write(filename, self, units=units)
- def implicitLeakToExplicitEMitter(self, registry):
+ def implicitLeakToExplicitEMitter(self, registry): # noqa: N802, D102
if len(self.expicit_leak) > 0:
- raise ValueError('Explicit leak is not reset')
+ raise ValueError('Explicit leak is not reset') # noqa: EM101, TRY003
registry.active_pipe_damages = OrderedDict()
for node_name in self.node_name_list:
node = self.get_node(node_name)
- if node._leak:
+ if node._leak: # noqa: SLF001
if node_name in self.expicit_leak:
raise ValueError(
'The node name in already in leak memory: ' + node_name
@@ -217,9 +217,9 @@ def implicitLeakToExplicitEMitter(self, registry):
cd = cd / (0.145038**0.5) # (gpm/(Psi^0.5))
# When writing to emitter, function from_si changes m^3ps to GPM
- new_node._emitter_coefficient = cd
+ new_node._emitter_coefficient = cd # noqa: SLF001
- if node.demand_timeseries_list[0].base_value > 0.001:
+ if node.demand_timeseries_list[0].base_value > 0.001: # noqa: PLR2004
raise ValueError('leak node has demand: ' + node_name)
temp = {
'node_name': node_name,
@@ -232,14 +232,14 @@ def implicitLeakToExplicitEMitter(self, registry):
registry.explicit_leak_node[node_name] = new_node_name
registry.active_pipe_damages.update({new_node_name: node_name})
- def implicitLeakToExplicitReservoir(self, registry):
+ def implicitLeakToExplicitReservoir(self, registry): # noqa: N802, D102
if len(self.expicit_leak) > 0:
- raise ValueError('Explicit leak is not reset')
+ raise ValueError('Explicit leak is not reset') # noqa: EM101, TRY003
registry.active_pipe_damages = OrderedDict()
for node_name in self.node_name_list:
node = self.get_node(node_name)
- if node._leak:
+ if node._leak: # noqa: SLF001
if node_name in self.expicit_leak:
raise ValueError(
'The node name in already in leak memory: ' + node_name
@@ -264,7 +264,7 @@ def implicitLeakToExplicitReservoir(self, registry):
check_valve=True,
)
- if node.demand_timeseries_list[0].base_value > 0.001:
+ if node.demand_timeseries_list[0].base_value > 0.001: # noqa: PLR2004
raise ValueError('leak node has demand: ' + node_name)
temp = {
'node_name': node_name,
@@ -276,20 +276,20 @@ def implicitLeakToExplicitReservoir(self, registry):
registry.explicit_leak_node[node_name] = new_node_name
registry.active_pipe_damages.update({new_node_name: node_name})
- def resetExplicitLeak(self):
+ def resetExplicitLeak(self): # noqa: N802, D102
for data in self.expicit_leak:
new_pipe_name = data['element1']
new_node_name = data['element2']
self.remove_link(new_pipe_name, force=True)
- self.get_node(new_node_name)._emitter_coefficient = None
+ self.get_node(new_node_name)._emitter_coefficient = None # noqa: SLF001
self.remove_node(new_node_name, force=True)
self.expicit_leak = []
- def linkBreackage(self, registry):
+ def linkBreackage(self, registry): # noqa: N802, D102
if len(self.breakage_link) > 0:
- raise ValueError('Breakckage is not unliked')
+ raise ValueError('Breakckage is not unliked') # noqa: EM101, TRY003
self.breakage_link = {}
pipe_damage_table = registry.getDamageData('PIPE')
@@ -297,20 +297,20 @@ def linkBreackage(self, registry):
pipe_damage_table['damage_type'] == 'break'
]
- for damage_node, row in broken_pipe_damage_table.iterrows():
- if registry.getPipeDamageAttribute('repair', damage_node) == True:
+ for damage_node, row in broken_pipe_damage_table.iterrows(): # noqa: B007
+ if registry.getPipeDamageAttribute('repair', damage_node) == True: # noqa: E712
continue
- pipe_A, pipe_B, orginal_pipe, node_A, node_B = registry.getBreakData(
+ pipe_A, pipe_B, orginal_pipe, node_A, node_B = registry.getBreakData( # noqa: N806
damage_node
)
pipe_name_list = self.pipe_name_list
junction_name_list = self.junction_name_list
- iPipe_A_in = pipe_A in pipe_name_list
- iPipe_B_in = pipe_B in pipe_name_list
- iNode_A_in = node_A in junction_name_list
- iNode_B_in = node_B in junction_name_list
+ iPipe_A_in = pipe_A in pipe_name_list # noqa: N806
+ iPipe_B_in = pipe_B in pipe_name_list # noqa: N806
+ iNode_A_in = node_A in junction_name_list # noqa: N806
+ iNode_B_in = node_B in junction_name_list # noqa: N806
if not iPipe_A_in or not iPipe_B_in or not iNode_A_in or not iNode_B_in:
if iPipe_A_in or iPipe_B_in or iNode_A_in or iNode_B_in:
@@ -341,8 +341,8 @@ def linkBreackage(self, registry):
)
self.breakage_link[damage_node] = new_pipe_name
- def unlinkBreackage(self):
- for damage_node, link_pipe_name in self.breakage_link.items():
+ def unlinkBreackage(self): # noqa: N802, D102
+ for damage_node, link_pipe_name in self.breakage_link.items(): # noqa: B007, PERF102
self.remove_link(link_pipe_name, force=True)
self.breakage_link = {}
diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/__init__.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/__init__.py
index e69de29bb..b74acee6d 100644
--- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/__init__.py
+++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/__init__.py
@@ -0,0 +1 @@
+# noqa: N999, D104
diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py
index 3c07e2f52..49008fad1 100644
--- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py
+++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/epanet.py
@@ -1,7 +1,7 @@
"""Created on Tue Jun 1 17:09:25 2021
@author: snaeimi
-"""
+""" # noqa: D400
import itertools
import logging
@@ -19,7 +19,7 @@
from wntrfr.sim.network_isolation import check_for_isolated_junctions, get_long_size
from wntrfr.utils.ordered_set import OrderedSet
-from ..epanet import toolkit
+from ..epanet import toolkit # noqa: TID252
logger = logging.getLogger(__name__)
@@ -59,7 +59,7 @@ class EpanetSimulator(EpanetSimulator):
"""
def __init__(self, wn):
- super(EpanetSimulator, self).__init__(wn)
+ super(EpanetSimulator, self).__init__(wn) # noqa: UP008
# Sina added this for time manipulate function
@@ -68,10 +68,10 @@ def __init__(self, wn):
# Sina added this for isolation init
long_size = get_long_size()
- if long_size == 4:
+ if long_size == 4: # noqa: PLR2004
self._int_dtype = np.int32
else:
- assert long_size == 8
+ assert long_size == 8 # noqa: PLR2004
self._int_dtype = np.int64
self._link_name_to_id = OrderedDict()
self._link_id_to_name = OrderedDict()
@@ -80,11 +80,11 @@ def __init__(self, wn):
self._initialize_name_id_maps()
# sina end
- def manipulateTimeOrder(
+ def manipulateTimeOrder( # noqa: N802, D102
self,
begin_time,
end_time,
- change_time_step=False,
+ change_time_step=False, # noqa: FBT002
min_correction_time_step=None,
):
time_dif = end_time - begin_time
@@ -95,9 +95,9 @@ def manipulateTimeOrder(
self._wn.options.time.pattern_start = begin_time
self._wn.options.time.start_clocktime = begin_time
if change_time_step:
- if min_correction_time_step == None:
- raise ValueError(
- 'if change_time_step is True, then min_correction_time_step must be provided'
+ if min_correction_time_step == None: # noqa: E711
+ raise ValueError( # noqa: TRY003
+ 'if change_time_step is True, then min_correction_time_step must be provided' # noqa: EM101
)
self._wn.options.time.hydraulic_timestep = (
@@ -109,34 +109,34 @@ def manipulateTimeOrder(
self._wn.options.time.report_timestep,
)
min_step_time = min(min_step_time, time_step)
- iFinished = False
+ iFinished = False # noqa: N806
i = 1
- logger.debug('time_dif= ' + repr(time_dif))
+ logger.debug('time_dif= ' + repr(time_dif)) # noqa: G003
time_step_list = list(range(min_step_time, time_step, min_step_time))
time_step_list.append(time_step)
while i <= len(time_step_list):
if time_dif % time_step_list[-i] == 0:
new_time_step = time_step_list[-i]
- iFinished = True
+ iFinished = True # noqa: N806
break
- elif i == len(time_step_list):
- raise ('There was no time check when creating time event?')
+ elif i == len(time_step_list): # noqa: RET508
+ raise ('There was no time check when creating time event?') # noqa: B016
i += 1
- if iFinished == False:
- raise RuntimeError('no timestep is found')
+ if iFinished == False: # noqa: E712
+ raise RuntimeError('no timestep is found') # noqa: EM101, TRY003
self._wn.options.time.report_timestep = new_time_step
- def run_sim(
+ def run_sim( # noqa: C901
self,
file_prefix='temp',
- save_hyd=False,
- use_hyd=False,
+ save_hyd=False, # noqa: FBT002
+ use_hyd=False, # noqa: FBT002
hydfile=None,
version=2.2,
- convergence_error=False,
+ convergence_error=False, # noqa: FBT002, ARG002
start_time=None,
- iModified=True,
+ iModified=True, # noqa: FBT002, N803
):
"""Run the EPANET simulator.
@@ -162,7 +162,7 @@ def run_sim(
i = 0
for solver_parameter in solver_parameters_list:
i += 1
- print(solver_parameter)
+ print(solver_parameter) # noqa: T201
self._wn.options.hydraulic.checkfreq = solver_parameter[0]
self._wn.options.hydraulic.maxcheck = solver_parameter[1]
self._wn.options.hydraulic.damplimit = solver_parameter[2]
@@ -176,7 +176,7 @@ def run_sim(
version=version,
)
- enData = toolkit.ENepanet(changed_epanet=iModified, version=version)
+ enData = toolkit.ENepanet(changed_epanet=iModified, version=version) # noqa: N806
rptfile = file_prefix + '.rpt'
outfile = file_prefix + '.bin'
if hydfile is None:
@@ -191,14 +191,14 @@ def run_sim(
except Exception as err:
enData.ENclose()
if err.args[0] == 'EPANET Error 110':
- print(enData.errcode)
+ print(enData.errcode) # noqa: T201
run_successful = False
if i < len(solver_parameters_list):
continue
- else:
- raise err
+ else: # noqa: RET507
+ raise err # noqa: TRY201
else:
- raise err
+ raise err # noqa: TRY201
else:
run_successful = True
logger.debug('Solved hydraulics')
@@ -213,7 +213,7 @@ def run_sim(
logger.debug('Ran quality')
except Exception as err:
enData.ENclose()
- raise err
+ raise err # noqa: TRY201
enData.ENclose()
logger.debug('Completed run')
result_data = self.reader.read(outfile)
@@ -231,12 +231,12 @@ def run_sim(
return result_data, run_successful
- def _updateResultStartTime(self, result_data, start_time):
- for res_type, res in result_data.link.items():
+ def _updateResultStartTime(self, result_data, start_time): # noqa: N802
+ for res_type, res in result_data.link.items(): # noqa: B007, PERF102
# result_data.link[res_type].index = res
res.index = res.index + start_time
- for res_type, res in result_data.node.items():
+ for res_type, res in result_data.node.items(): # noqa: B007, PERF102
# result_data.link[res_type].index = res
res.index = res.index + start_time
@@ -256,14 +256,14 @@ def _get_isolated_junctions_and_links(
for j in self._prev_isolated_junctions:
try:
junction = self._wn.get_node(j)
- junction._is_isolated = False
- except:
+ junction._is_isolated = False # noqa: SLF001
+ except: # noqa: S110, PERF203, E722
pass
- for l in self._prev_isolated_links:
+ for l in self._prev_isolated_links: # noqa: E741
try:
link = self._wn.get_link(l)
- link._is_isolated = False
- except:
+ link._is_isolated = False # noqa: SLF001
+ except: # noqa: S110, PERF203, E722
pass
node_indicator = np.ones(self._wn.num_nodes, dtype=self._int_dtype)
check_for_isolated_junctions(
@@ -283,29 +283,29 @@ def _get_isolated_junctions_and_links(
for j_id in isolated_junction_ids:
j = self._node_id_to_name[j_id]
junction = self._wn.get_node(j)
- junction._is_isolated = True
+ junction._is_isolated = True # noqa: SLF001
isolated_junctions.add(j)
connected_links = self._wn.get_links_for_node(j)
- for l in connected_links:
+ for l in connected_links: # noqa: E741
link = self._wn.get_link(l)
- link._is_isolated = True
+ link._is_isolated = True # noqa: SLF001
isolated_links.add(l)
if logger_level <= logging.DEBUG:
if len(isolated_junctions) > 0 or len(isolated_links) > 0:
- raise ValueError(f'isolated junctions: {isolated_junctions}')
+ raise ValueError(f'isolated junctions: {isolated_junctions}') # noqa: EM102, TRY003
logger.debug(f'isolated links: {isolated_links}')
self._prev_isolated_junctions = isolated_junctions
self._prev_isolated_links = isolated_links
return isolated_junctions, isolated_links
- def _initialize_internal_graph(self):
+ def _initialize_internal_graph(self): # noqa: C901
n_links = OrderedDict()
rows = []
cols = []
vals = []
- for link_name, link in itertools.chain(
+ for link_name, link in itertools.chain( # noqa: B007
self._wn.pipes(), self._wn.pumps(), self._wn.valves()
):
from_node_name = link.start_node_name
@@ -334,7 +334,7 @@ def _initialize_internal_graph(self):
vals.append(1)
elif link.link_type == 'Valve':
if (
- link.valve_type == 'PRV'
+ link.valve_type == 'PRV' # noqa: PLR1714
or link.valve_type == 'PSV'
or link.valve_type == 'FCV'
):
@@ -353,7 +353,7 @@ def _initialize_internal_graph(self):
self._internal_graph = scipy.sparse.csr_matrix((vals, (rows, cols)))
ndx_map = OrderedDict()
- for link_name, link in self._wn.links():
+ for link_name, link in self._wn.links(): # noqa: B007
from_node_name = link.start_node_name
to_node_name = link.end_node_name
from_node_id = self._node_name_to_id[from_node_name]
@@ -368,7 +368,7 @@ def _initialize_internal_graph(self):
self._map_link_to_internal_graph_data_ndx = ndx_map
self._number_of_connections = [0 for i in range(self._wn.num_nodes)]
- for node_id in self._node_id_to_name.keys():
+ for node_id in self._node_id_to_name.keys(): # noqa: SIM118
self._number_of_connections[node_id] = (
self._internal_graph.indptr[node_id + 1]
- self._internal_graph.indptr[node_id]
@@ -378,7 +378,7 @@ def _initialize_internal_graph(self):
)
self._node_pairs_with_multiple_links = OrderedDict()
- for from_node_id, to_node_id in n_links.keys():
+ for from_node_id, to_node_id in n_links.keys(): # noqa: SIM118
if n_links[(from_node_id, to_node_id)] > 1:
if (
to_node_id,
@@ -395,7 +395,7 @@ def _initialize_internal_graph(self):
for link_name in self._wn.get_links_for_node(from_node_name):
link = self._wn.get_link(link_name)
if (
- link.start_node_name == to_node_name
+ link.start_node_name == to_node_name # noqa: PLR1714
or link.end_node_name == to_node_name
):
tmp_list.append(link)
@@ -406,12 +406,12 @@ def _initialize_internal_graph(self):
self._source_ids = []
for node_name, node in self._wn.tanks():
- if node.init_level - node.min_level < 0.01:
+ if node.init_level - node.min_level < 0.01: # noqa: PLR2004
continue
node_id = self._node_name_to_id[node_name]
self._source_ids.append(node_id)
- for node_name, node in self._wn.reservoirs():
+ for node_name, node in self._wn.reservoirs(): # noqa: B007
connected_link_name_list = self._wn.get_links_for_node(
node_name
) # this is to exclude the reservoirs that are for leak only
@@ -429,8 +429,8 @@ def _initialize_internal_graph(self):
link.name
for link in out_going_pipe_list_name
if (
- (link.cv == False and link.initial_status != LinkStatus.Closed)
- or (link.cv == True and link.end_node_name != node_name)
+ (link.cv == False and link.initial_status != LinkStatus.Closed) # noqa: E712
+ or (link.cv == True and link.end_node_name != node_name) # noqa: E712
)
]
out_going_link_list_name.extend(out_going_pipe_list_name)
@@ -455,7 +455,7 @@ def _update_internal_graph(self):
data[ndx1] = 1
data[ndx2] = 1
- for key, link_list in self._node_pairs_with_multiple_links.items():
+ for key, link_list in self._node_pairs_with_multiple_links.items(): # noqa: B007, PERF102
first_link = link_list[0]
ndx1, ndx2 = ndx_map[first_link]
data[ndx1] = 0
@@ -468,17 +468,17 @@ def _update_internal_graph(self):
def _initialize_name_id_maps(self):
n = 0
- for link_name, link in self._wn.links():
+ for link_name, link in self._wn.links(): # noqa: B007
self._link_name_to_id[link_name] = n
self._link_id_to_name[n] = link_name
- n += 1
+ n += 1 # noqa: SIM113
n = 0
- for node_name, node in self._wn.nodes():
+ for node_name, node in self._wn.nodes(): # noqa: B007
self._node_name_to_id[node_name] = n
self._node_id_to_name[n] = node_name
n += 1
- def now_temp(
+ def now_temp( # noqa: D102
self,
rr,
isolated_link_list,
@@ -493,12 +493,12 @@ def now_temp(
and node_name not in already_done_nodes
]
junctions_pressure = (rr.node['pressure'][check_nodes]).iloc[-1]
- negative_junctions_pressure = junctions_pressure[(junctions_pressure < -10)]
+ negative_junctions_pressure = junctions_pressure[(junctions_pressure < -10)] # noqa: PLR2004
negative_junctions_pressure = negative_junctions_pressure.sort_values(
ascending=False
)
negative_junctions_name_list = negative_junctions_pressure.index.to_list()
- print('size= ' + repr(len(negative_junctions_name_list)))
+ print('size= ' + repr(len(negative_junctions_name_list))) # noqa: T201
pipes_to_be_closed = []
closed_pipes = []
@@ -524,7 +524,7 @@ def now_temp(
for checked_pipe in pipe_linked_to_node
if self._wn.get_link(checked_pipe).link_type == 'Pipe'
and checked_pipe not in isolated_link_list
- and self._wn.get_link(checked_pipe).cv == False
+ and self._wn.get_link(checked_pipe).cv == False # noqa: E712
and self._wn.get_link(checked_pipe).initial_status == 1
and self._wn.get_link(checked_pipe).start_node.node_type
== 'Junction'
@@ -539,7 +539,7 @@ def now_temp(
# pipe = self.wn.get_link(pipe_name)
flow = rr.link['flowrate'][pipe_name].iloc[-1]
- if abs(flow) > 0.01:
+ if abs(flow) > 0.01: # noqa: PLR2004
flag = True
# pipe.initial_status = LinkStatus(0)
closed_pipes.append(pipe_name)
@@ -551,7 +551,7 @@ def now_temp(
ifinish = True
return closed_pipes, already_done_nodes, ifinish
- def alterPipeKmNNN(
+ def alterPipeKmNNN( # noqa: N802, D102
self,
rr,
isolated_link_list,
@@ -583,7 +583,7 @@ def alterPipeKmNNN(
last_flow_row = rr.link['flowrate'].iloc[-1]
pipe_found = False
- while pipe_found == False:
+ while pipe_found == False: # noqa: E712
if len(negative_junctions_name_list) == 0:
ifinish = True
return closed_pipes, ifinish
@@ -609,7 +609,7 @@ def alterPipeKmNNN(
if len(abs_most_recent_flow_for_pipes) == 0:
negative_junctions_pressure.drop(
negative_junctions_name_list[-1],
- inplace=True,
+ inplace=True, # noqa: PD002
)
negative_junctions_name_list = (
negative_junctions_pressure.index.to_list()
@@ -627,10 +627,10 @@ def alterPipeKmNNN(
# n2 = pipe.end_node_name
# n1_pressure = rr.node['pressure'][n1].iloc[-1]
# n2_pressure = rr.node['pressure'][n2].iloc[-1]
- already_C = pipe.minor_loss
+ already_C = pipe.minor_loss # noqa: N806
# if already_C < 0.001:
# already_C = 1
- new_C = (1000 * 2 * 9.81 * (pipe.diameter**2 * math.pi / 4) ** 2) / (
+ new_C = (1000 * 2 * 9.81 * (pipe.diameter**2 * math.pi / 4) ** 2) / ( # noqa: N806
(biggest_flow_pipe_abs_flow) ** 2
) + already_C # the last of 100 is to magnify the c choosing
pipe.minor_loss = new_C
@@ -673,7 +673,7 @@ def alterPipeKmNNN(
# def check_pipes_sin(self, pipe_list):
# for pipe_name in pipe_list:
- def closePipeNNN(
+ def closePipeNNN( # noqa: N802, D102
self,
rr,
isolated_link_list,
@@ -703,7 +703,7 @@ def closePipeNNN(
last_flow_row = rr.link['flowrate'].iloc[-1]
pipe_found = False
- while pipe_found == False:
+ while pipe_found == False: # noqa: E712
if len(negative_junctions_name_list) == 0:
ifinish = True
return closed_pipes, ifinish
@@ -729,7 +729,7 @@ def closePipeNNN(
if len(abs_most_recent_flow_for_pipes) == 0:
negative_junctions_pressure.drop(
negative_junctions_name_list[-1],
- inplace=True,
+ inplace=True, # noqa: PD002
)
negative_junctions_name_list = (
negative_junctions_pressure.index.to_list()
@@ -741,10 +741,10 @@ def closePipeNNN(
ascending=False
)
biggest_flow_pipe_name = abs_most_recent_flow_for_pipes.index[0]
- biggest_flow_pipe_abs_flow = abs_most_recent_flow_for_pipes.iloc[0]
+ biggest_flow_pipe_abs_flow = abs_most_recent_flow_for_pipes.iloc[0] # noqa: F841
pipe = self._wn.get_link(biggest_flow_pipe_name)
- already_C = pipe.minor_loss
+ already_C = pipe.minor_loss # noqa: N806, F841
initial_status = pipe.initial_status
closed_pipes[biggest_flow_pipe_name] = initial_status
pipe.initial_status = LinkStatus.Closed
diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py
index e8889baf0..d1d29a13a 100644
--- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py
+++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/io.py
@@ -128,10 +128,10 @@ def _is_number(s):
----------
s : anything
- """
+ """ # noqa: D400, D401
try:
float(s)
- return True
+ return True # noqa: TRY300
except ValueError:
return False
@@ -149,7 +149,7 @@ def _str_time_to_sec(s):
-------
Integer value of time in seconds.
- """
+ """ # noqa: D401
pattern1 = re.compile(r'^(\d+):(\d+):(\d+)$')
time_tuple = pattern1.search(s)
if bool(time_tuple):
@@ -158,7 +158,7 @@ def _str_time_to_sec(s):
+ int(time_tuple.groups()[1]) * 60
+ int(round(float(time_tuple.groups()[2])))
)
- else:
+ else: # noqa: RET505
pattern2 = re.compile(r'^(\d+):(\d+)$')
time_tuple = pattern2.search(s)
if bool(time_tuple):
@@ -166,16 +166,16 @@ def _str_time_to_sec(s):
int(time_tuple.groups()[0]) * 60 * 60
+ int(time_tuple.groups()[1]) * 60
)
- else:
+ else: # noqa: RET505
pattern3 = re.compile(r'^(\d+)$')
time_tuple = pattern3.search(s)
if bool(time_tuple):
return int(time_tuple.groups()[0]) * 60 * 60
- else:
- raise RuntimeError('Time format in ' 'INP file not recognized. ')
+ else: # noqa: RET505
+ raise RuntimeError('Time format in ' 'INP file not recognized. ') # noqa: EM101, TRY003
-def _clock_time_to_sec(s, am_pm):
+def _clock_time_to_sec(s, am_pm): # noqa: C901
"""Converts EPANET clocktime format to seconds.
Parameters
@@ -191,13 +191,13 @@ def _clock_time_to_sec(s, am_pm):
-------
Integer value of time in seconds
- """
+ """ # noqa: D401
if am_pm.upper() == 'AM':
am = True
elif am_pm.upper() == 'PM':
am = False
else:
- raise RuntimeError('am_pm option not recognized; options are AM or PM')
+ raise RuntimeError('am_pm option not recognized; options are AM or PM') # noqa: EM101, TRY003
pattern1 = re.compile(r'^(\d+):(\d+):(\d+)$')
time_tuple = pattern1.search(s)
@@ -211,12 +211,12 @@ def _clock_time_to_sec(s, am_pm):
time_sec -= 3600 * 12
if not am:
if time_sec >= 3600 * 12:
- raise RuntimeError(
- 'Cannot specify am/pm for times greater than 12:00:00'
+ raise RuntimeError( # noqa: TRY003
+ 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101
)
time_sec += 3600 * 12
return time_sec
- else:
+ else: # noqa: RET505
pattern2 = re.compile(r'^(\d+):(\d+)$')
time_tuple = pattern2.search(s)
if bool(time_tuple):
@@ -228,12 +228,12 @@ def _clock_time_to_sec(s, am_pm):
time_sec -= 3600 * 12
if not am:
if time_sec >= 3600 * 12:
- raise RuntimeError(
- 'Cannot specify am/pm for times greater than 12:00:00'
+ raise RuntimeError( # noqa: TRY003
+ 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101
)
time_sec += 3600 * 12
return time_sec
- else:
+ else: # noqa: RET505
pattern3 = re.compile(r'^(\d+)$')
time_tuple = pattern3.search(s)
if bool(time_tuple):
@@ -242,13 +242,13 @@ def _clock_time_to_sec(s, am_pm):
time_sec -= 3600 * 12
if not am:
if time_sec >= 3600 * 12:
- raise RuntimeError(
- 'Cannot specify am/pm for times greater than 12:00:00'
+ raise RuntimeError( # noqa: TRY003
+ 'Cannot specify am/pm for times greater than 12:00:00' # noqa: EM101
)
time_sec += 3600 * 12
return time_sec
- else:
- raise RuntimeError('Time format in ' 'INP file not recognized. ')
+ else: # noqa: RET505
+ raise RuntimeError('Time format in ' 'INP file not recognized. ') # noqa: EM101, TRY003
def _sec_to_string(sec):
@@ -281,7 +281,7 @@ def _write_junctions(self, f, wn):
for junction_name in nnames:
junction = wn.nodes[junction_name]
# sina added this
- if junction._is_isolated == True:
+ if junction._is_isolated == True: # noqa: SLF001, E712
continue
if junction.demand_timeseries_list:
base_demands = junction.demand_timeseries_list.base_demand_list()
@@ -300,7 +300,7 @@ def _write_junctions(self, f, wn):
else:
base_demand = 0.0
demand_pattern = None
- E = {
+ E = { # noqa: N806
'name': junction_name,
'elev': from_si(
self.flow_units, junction.elevation, HydParam.Elevation
@@ -322,9 +322,9 @@ def _write_reservoirs(self, f, wn):
for reservoir_name in nnames:
reservoir = wn.nodes[reservoir_name]
# sina added this
- if reservoir._is_isolated == True:
+ if reservoir._is_isolated == True: # noqa: SLF001, E712
continue
- E = {
+ E = { # noqa: N806
'name': reservoir_name,
'head': from_si(
self.flow_units,
@@ -358,9 +358,9 @@ def _write_tanks(self, f, wn):
# nnames.sort()
for tank_name in nnames:
tank = wn.nodes[tank_name]
- if tank._is_isolated == True: # sina added this
+ if tank._is_isolated == True: # sina added this # noqa: SLF001, E712
continue
- E = {
+ E = { # noqa: N806
'name': tank_name,
'elev': from_si(self.flow_units, tank.elevation, HydParam.Elevation),
'initlev': from_si(
@@ -402,9 +402,9 @@ def _write_pipes(self, f, wn):
# lnames.sort()
for pipe_name in lnames:
pipe = wn.links[pipe_name]
- if pipe._is_isolated == True: # Sina added this
+ if pipe._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
- E = {
+ E = { # noqa: N806
'name': pipe_name,
'node1': pipe.start_node_name,
'node2': pipe.end_node_name,
@@ -431,9 +431,9 @@ def _write_pumps(self, f, wn):
# lnames.sort()
for pump_name in lnames:
pump = wn.links[pump_name]
- if pump._is_isolated == True: # Sina added this
+ if pump._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
- E = {
+ E = { # noqa: N806
'name': pump_name,
'node1': pump.start_node_name,
'node2': pump.end_node_name,
@@ -450,7 +450,7 @@ def _write_pumps(self, f, wn):
from_si(self.flow_units, pump.power, HydParam.Power)
)
else:
- raise RuntimeError('Only head or power info is supported of pumps.')
+ raise RuntimeError('Only head or power info is supported of pumps.') # noqa: EM101, TRY003
tmp_entry = _PUMP_ENTRY
if pump.speed_timeseries.base_value != 1:
E['speed_keyword'] = 'SPEED'
@@ -480,9 +480,9 @@ def _write_valves(self, f, wn):
# lnames.sort()
for valve_name in lnames:
valve = wn.links[valve_name]
- if valve._is_isolated == True: # Sina added this
+ if valve._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
- E = {
+ E = { # noqa: N806
'name': valve_name,
'node1': valve.start_node_name,
'node2': valve.end_node_name,
@@ -490,7 +490,7 @@ def _write_valves(self, f, wn):
self.flow_units, valve.diameter, HydParam.PipeDiameter
),
'vtype': valve.valve_type,
- 'set': valve._initial_setting,
+ 'set': valve._initial_setting, # noqa: SLF001
'mloss': valve.minor_loss,
'com': ';',
}
@@ -499,17 +499,17 @@ def _write_valves(self, f, wn):
if valve_type in ['PRV', 'PSV', 'PBV']:
valve_set = from_si(
self.flow_units,
- valve._initial_setting,
+ valve._initial_setting, # noqa: SLF001
HydParam.Pressure,
)
elif valve_type == 'FCV':
valve_set = from_si(
self.flow_units,
- valve._initial_setting,
+ valve._initial_setting, # noqa: SLF001
HydParam.Flow,
)
elif valve_type == 'TCV':
- valve_set = valve._initial_setting
+ valve_set = valve._initial_setting # noqa: SLF001
elif valve_type == 'GPV':
valve_set = valve.headloss_curve_name
formatter = _GPV_ENTRY
@@ -527,12 +527,12 @@ def _write_emitters(self, f, wn):
for junction_name in njunctions:
junction = wn.nodes[junction_name]
# Sina added this
- if junction._is_isolated == True:
+ if junction._is_isolated == True: # noqa: SLF001, E712
continue
- if junction._emitter_coefficient:
+ if junction._emitter_coefficient: # noqa: SLF001
val = from_si(
self.flow_units,
- junction._emitter_coefficient,
+ junction._emitter_coefficient, # noqa: SLF001
HydParam.Flow,
)
f.write(entry.format(junction_name, str(val)).encode('ascii'))
@@ -544,7 +544,7 @@ def _write_status(self, f, wn):
f.write('[STATUS]\n'.encode('ascii'))
f.write('{:10s} {:10s}\n'.format(';ID', 'Setting').encode('ascii'))
for link_name, link in wn.links():
- if link._is_isolated == True: # Sina added this
+ if link._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
if isinstance(link, Pipe):
continue
@@ -587,21 +587,21 @@ def _write_status(self, f, wn):
# setting).encode('ascii'))
# f.write('\n'.encode('ascii'))
- def _write_controls(self, f, wn):
+ def _write_controls(self, f, wn): # noqa: C901
def get_setting(control_action, control_name):
- value = control_action._value
- attribute = control_action._attribute.lower()
+ value = control_action._value # noqa: SLF001
+ attribute = control_action._attribute.lower() # noqa: SLF001
if attribute == 'status':
setting = LinkStatus(value).name
elif attribute == 'base_speed':
setting = str(value)
elif attribute == 'setting' and isinstance(
- control_action._target_obj,
+ control_action._target_obj, # noqa: SLF001
Valve,
):
- valve = control_action._target_obj
+ valve = control_action._target_obj # noqa: SLF001
valve_type = valve.valve_type
- if valve_type == 'PRV' or valve_type == 'PSV' or valve_type == 'PBV':
+ if valve_type == 'PRV' or valve_type == 'PSV' or valve_type == 'PBV': # noqa: PLR1714
setting = str(from_si(self.flow_units, value, HydParam.Pressure))
elif valve_type == 'FCV':
setting = str(from_si(self.flow_units, value, HydParam.Flow))
@@ -616,7 +616,7 @@ def get_setting(control_action, control_name):
else:
setting = None
logger.warning(
- 'Could not write control ' + str(control_name) + ' - skipping'
+ 'Could not write control ' + str(control_name) + ' - skipping' # noqa: G003
)
return setting
@@ -624,102 +624,102 @@ def get_setting(control_action, control_name):
f.write('[CONTROLS]\n'.encode('ascii'))
# Time controls and conditional controls only
for text, all_control in wn.controls():
- control_action = all_control._then_actions[0]
+ control_action = all_control._then_actions[0] # noqa: SLF001
# Sina added this
- if control_action._target_obj._is_isolated == True:
+ if control_action._target_obj._is_isolated == True: # noqa: SLF001, E712
continue
if all_control.epanet_control_type is not _ControlType.rule:
if (
- len(all_control._then_actions) != 1
- or len(all_control._else_actions) != 0
+ len(all_control._then_actions) != 1 # noqa: SLF001
+ or len(all_control._else_actions) != 0 # noqa: SLF001
):
- logger.error('Too many actions on CONTROL "%s"' % text)
- raise RuntimeError('Too many actions on CONTROL "%s"' % text)
+ logger.error('Too many actions on CONTROL "%s"' % text) # noqa: G002, UP031
+ raise RuntimeError('Too many actions on CONTROL "%s"' % text) # noqa: UP031
if not isinstance(control_action.target()[0], Link):
continue
if isinstance(
- all_control._condition,
+ all_control._condition, # noqa: SLF001
(SimTimeCondition, TimeOfDayCondition),
):
entry = '{ltype} {link} {setting} AT {compare} {time:g}\n'
vals = {
- 'ltype': control_action._target_obj.link_type,
- 'link': control_action._target_obj.name,
+ 'ltype': control_action._target_obj.link_type, # noqa: SLF001
+ 'link': control_action._target_obj.name, # noqa: SLF001
'setting': get_setting(control_action, text),
'compare': 'TIME',
- 'time': all_control._condition._threshold / 3600.0,
+ 'time': all_control._condition._threshold / 3600.0, # noqa: SLF001
}
if vals['setting'] is None:
continue
- if isinstance(all_control._condition, TimeOfDayCondition):
+ if isinstance(all_control._condition, TimeOfDayCondition): # noqa: SLF001
vals['compare'] = 'CLOCKTIME'
f.write(entry.format(**vals).encode('ascii'))
elif (
- all_control._condition._source_obj._is_isolated == True
+ all_control._condition._source_obj._is_isolated == True # noqa: SLF001, E712
): # Sina added this
continue
- elif isinstance(all_control._condition, (ValueCondition)):
+ elif isinstance(all_control._condition, (ValueCondition)): # noqa: SLF001
entry = '{ltype} {link} {setting} IF {ntype} {node} {compare} {thresh}\n'
vals = {
- 'ltype': control_action._target_obj.link_type,
- 'link': control_action._target_obj.name,
+ 'ltype': control_action._target_obj.link_type, # noqa: SLF001
+ 'link': control_action._target_obj.name, # noqa: SLF001
'setting': get_setting(control_action, text),
- 'ntype': all_control._condition._source_obj.node_type,
- 'node': all_control._condition._source_obj.name,
+ 'ntype': all_control._condition._source_obj.node_type, # noqa: SLF001
+ 'node': all_control._condition._source_obj.name, # noqa: SLF001
'compare': 'above',
'thresh': 0.0,
}
if vals['setting'] is None:
continue
- if all_control._condition._relation in [
+ if all_control._condition._relation in [ # noqa: SLF001
np.less,
np.less_equal,
Comparison.le,
Comparison.lt,
]:
vals['compare'] = 'below'
- threshold = all_control._condition._threshold
- if isinstance(all_control._condition._source_obj, Tank):
+ threshold = all_control._condition._threshold # noqa: SLF001
+ if isinstance(all_control._condition._source_obj, Tank): # noqa: SLF001
vals['thresh'] = from_si(
self.flow_units, threshold, HydParam.HydraulicHead
)
- elif isinstance(all_control._condition._source_obj, Junction):
+ elif isinstance(all_control._condition._source_obj, Junction): # noqa: SLF001
vals['thresh'] = from_si(
self.flow_units, threshold, HydParam.Pressure
)
else:
- raise RuntimeError(
- 'Unknown control for EPANET INP files: %s'
+ raise RuntimeError( # noqa: TRY004
+ 'Unknown control for EPANET INP files: %s' # noqa: UP031
% type(all_control)
)
f.write(entry.format(**vals).encode('ascii'))
elif not isinstance(all_control, Control):
raise RuntimeError(
- 'Unknown control for EPANET INP files: %s'
+ 'Unknown control for EPANET INP files: %s' # noqa: UP031
% type(all_control)
)
f.write('\n'.encode('ascii'))
def _write_rules(self, f, wn):
f.write('[RULES]\n'.encode('ascii'))
- for text, all_control in wn.controls():
+ for text, all_control in wn.controls(): # noqa: B007
entry = '{}\n'
if all_control.epanet_control_type == _ControlType.rule:
# Sina added this begin
try:
- if all_control._then_actions[0]._target_obj._is_isolated == True:
+ if all_control._then_actions[0]._target_obj._is_isolated == True: # noqa: SLF001, E712
continue
- except:
+ except: # noqa: S110, E722
pass
try:
- if all_control.condition._source_obj._is_isolated == True:
+ if all_control.condition._source_obj._is_isolated == True: # noqa: SLF001, E712
continue
- except:
+ except: # noqa: S110, E722
pass
# Sina added this end
- rule = _EpanetRule('blah', self.flow_units, self.mass_units)
+ rule = _EpanetRule('blah', self.flow_units, self.mass_units) # noqa: F821
rule.from_if_then_else(all_control)
f.write(entry.format(str(rule)).encode('ascii'))
f.write('\n'.encode('ascii'))
@@ -733,12 +733,12 @@ def _write_demands(self, f, wn):
# nodes.sort()
for node in nodes:
# Sina added this
- if wn.get_node(node)._is_isolated == True:
+ if wn.get_node(node)._is_isolated == True: # noqa: SLF001, E712
continue
demands = wn.get_node(node).demand_timeseries_list
# leak =
if len(demands) > 1:
- for ct, demand in enumerate(demands):
+ for ct, demand in enumerate(demands): # noqa: B007
cat = str(demand.category)
# if cat == 'EN2 base':
# cat = ''
@@ -746,7 +746,7 @@ def _write_demands(self, f, wn):
cat = ''
else:
cat = ' ;' + demand.category
- E = {
+ E = { # noqa: N806
'node': node,
'base': from_si(
self.flow_units, demand.base_value, HydParam.Demand
@@ -768,12 +768,12 @@ def _write_demands(self, f, wn):
def _write_quality(self, f, wn):
f.write('[QUALITY]\n'.encode('ascii'))
entry = '{:10s} {:10s}\n'
- label = '{:10s} {:10s}\n'
+ label = '{:10s} {:10s}\n' # noqa: F841
nnodes = list(wn.nodes.keys())
# nnodes.sort()
for node_name in nnodes:
node = wn.nodes[node_name]
- if node._is_isolated == True: # Sina added this
+ if node._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
if node.initial_quality:
if wn.options.quality.mode == 'CHEMICAL':
@@ -800,7 +800,7 @@ def _write_reactions(self, f, wn):
entry_int = ' {:s} {:s} {:d}\n'
entry_float = ' {:s} {:s} {:<10.4f}\n'
for tank_name, tank in wn.nodes(Tank):
- if tank._is_isolated == True: # Sina added this
+ if tank._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
if tank.bulk_rxn_coeff is not None:
f.write(
@@ -817,7 +817,7 @@ def _write_reactions(self, f, wn):
).encode('ascii')
)
for pipe_name, pipe in wn.links(Pipe):
- if pipe._is_isolated == True: # Sina added this
+ if pipe._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
if pipe.bulk_rxn_coeff is not None:
f.write(
@@ -909,11 +909,11 @@ def _write_sources(self, f, wn):
entry = '{:10s} {:10s} {:10s} {:10s}\n'
label = '{:10s} {:10s} {:10s} {:10s}\n'
f.write(label.format(';Node', 'Type', 'Quality', 'Pattern').encode('ascii'))
- nsources = list(wn._sources.keys())
+ nsources = list(wn._sources.keys()) # noqa: SLF001
# nsources.sort()
for source_name in nsources:
- source = wn._sources[source_name]
- if source._is_isolated == True: # Sina added this
+ source = wn._sources[source_name] # noqa: SLF001
+ if source._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
if source.source_type.upper() == 'MASS':
@@ -931,7 +931,7 @@ def _write_sources(self, f, wn):
self.mass_units,
)
- E = {
+ E = { # noqa: N806
'node': source.node_name,
'type': source.source_type,
'quality': str(strength),
@@ -957,12 +957,12 @@ def _write_mixing(self, f, wn):
# lnames.sort()
for tank_name in lnames:
tank = wn.nodes[tank_name]
- if tank._is_isolated == True: # Sina added this
+ if tank._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
- if tank._mix_model is not None:
- if tank._mix_model in [MixType.Mixed, MixType.Mix1, 0]:
+ if tank._mix_model is not None: # noqa: SLF001
+ if tank._mix_model in [MixType.Mixed, MixType.Mix1, 0]: # noqa: SLF001
f.write(f' {tank_name:19s} MIXED\n'.encode('ascii'))
- elif tank._mix_model in [
+ elif tank._mix_model in [ # noqa: SLF001
MixType.TwoComp,
MixType.Mix2,
'2comp',
@@ -970,22 +970,22 @@ def _write_mixing(self, f, wn):
1,
]:
f.write(
- f' {tank_name:19s} 2COMP {tank._mix_frac}\n'.encode('ascii')
+ f' {tank_name:19s} 2COMP {tank._mix_frac}\n'.encode('ascii') # noqa: SLF001
)
- elif tank._mix_model in [MixType.FIFO, 2]:
+ elif tank._mix_model in [MixType.FIFO, 2]: # noqa: SLF001
f.write(f' {tank_name:19s} FIFO\n'.encode('ascii'))
- elif tank._mix_model in [MixType.LIFO, 3]:
+ elif tank._mix_model in [MixType.LIFO, 3]: # noqa: SLF001
f.write(f' {tank_name:19s} LIFO\n'.encode('ascii'))
- elif isinstance(tank._mix_model, str) and tank._mix_frac is not None:
+ elif isinstance(tank._mix_model, str) and tank._mix_frac is not None: # noqa: SLF001
f.write(
- f' {tank_name:19s} {tank._mix_model} {tank._mix_frac}\n'.encode(
+ f' {tank_name:19s} {tank._mix_model} {tank._mix_frac}\n'.encode( # noqa: SLF001
'ascii'
)
)
- elif isinstance(tank._mix_model, str):
- f.write(f' {tank_name:19s} {tank._mix_model}\n'.encode('ascii'))
+ elif isinstance(tank._mix_model, str): # noqa: SLF001
+ f.write(f' {tank_name:19s} {tank._mix_model}\n'.encode('ascii')) # noqa: SLF001
else:
- logger.warning('Unknown mixing model: %s', tank._mix_model)
+ logger.warning('Unknown mixing model: %s', tank._mix_model) # noqa: SLF001
f.write('\n'.encode('ascii'))
# Options and Reporting
@@ -1188,7 +1188,7 @@ def _write_times(self, f, wn):
day = int(hrs / 24)
hrs -= day * 24
- if hrs < 12:
+ if hrs < 12: # noqa: PLR2004
time_format = ' AM'
else:
hrs -= 12
@@ -1201,7 +1201,7 @@ def _write_times(self, f, wn):
hrs, mm, sec = _sec_to_string(wn.options.time.rule_timestep)
- # TODO: RULE TIMESTEP is not written?!
+ # TODO: RULE TIMESTEP is not written?! # noqa: TD002
# f.write(time_entry.format('RULE TIMESTEP', hrs, mm, int(sec)).encode('ascii'))
f.write(
entry.format('STATISTIC', wn.options.results.statistic).encode('ascii')
@@ -1214,7 +1214,7 @@ def _write_coordinates(self, f, wn):
label = '{:10s} {:10s} {:10s}\n'
f.write(label.format(';Node', 'X-Coord', 'Y-Coord').encode('ascii'))
for name, node in wn.nodes():
- if node._is_isolated == True: # Sina added this
+ if node._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
val = node.coordinates
f.write(entry.format(name, val[0], val[1]).encode('ascii'))
@@ -1229,9 +1229,9 @@ def _write_vertices(self, f, wn):
# lnames.sort()
for pipe_name in lnames:
pipe = wn.links[pipe_name]
- if pipe._is_isolated == True: # Sina added this
+ if pipe._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
- for vert in pipe._vertices:
+ for vert in pipe._vertices: # noqa: SLF001
f.write(entry.format(pipe_name, vert[0], vert[1]).encode('ascii'))
f.write('\n'.encode('ascii'))
@@ -1244,7 +1244,7 @@ def _write_tags(self, f, wn):
# nnodes.sort()
for node_name in nnodes:
node = wn.nodes[node_name]
- if node._is_isolated == True: # Sina added this
+ if node._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
if node.tag:
f.write(entry.format('NODE', node_name, node.tag).encode('ascii'))
@@ -1252,7 +1252,7 @@ def _write_tags(self, f, wn):
nlinks.sort()
for link_name in nlinks:
link = wn.links[link_name]
- if link._is_isolated == True: # Sina added this
+ if link._is_isolated == True: # Sina added this # noqa: SLF001, E712
continue
if link.tag:
f.write(entry.format('LINK', link_name, link.tag).encode('ascii'))
@@ -1295,15 +1295,15 @@ class BinFile(wntrfr.epanet.io.BinFile):
def __init__(
self,
- result_types=None,
- network=False,
- energy=False,
- statistics=False,
- convert_status=True,
+ result_types=None, # noqa: ARG002
+ network=False, # noqa: FBT002, ARG002
+ energy=False, # noqa: FBT002, ARG002
+ statistics=False, # noqa: FBT002, ARG002
+ convert_status=True, # noqa: FBT002, ARG002
):
super().__init__()
- def read(self, filename, custom_handlers=False, start_time=None):
+ def read(self, filename, custom_handlers=False, start_time=None): # noqa: FBT002, C901, PLR0915
"""Read a binary file and create a results object.
Parameters
@@ -1332,13 +1332,13 @@ def read(self, filename, custom_handlers=False, start_time=None):
"""
self.results = wntrfr.sim.SimulationResults()
logger.debug(start_time)
- if start_time == None:
+ if start_time == None: # noqa: E711
start_time = 0
logger.debug('Read binary EPANET data from %s', filename)
dt_str = f'|S{self.idlen}'
- with open(filename, 'rb') as fin:
+ with open(filename, 'rb') as fin: # noqa: PTH123
ftype = self.ftype
- idlen = self.idlen
+ idlen = self.idlen # noqa: F841
logger.debug('... read prolog information ...')
prolog = np.fromfile(fin, dtype=np.int32, count=15)
magic1 = prolog[0]
@@ -1418,18 +1418,18 @@ def read(self, filename, custom_handlers=False, start_time=None):
).tolist()
self.node_names = nodenames
self.link_names = linknames
- linkstart = np.array(
+ linkstart = np.array( # noqa: F841
np.fromfile(fin, dtype=np.int32, count=nlinks), dtype=int
)
- linkend = np.array(
+ linkend = np.array( # noqa: F841
np.fromfile(fin, dtype=np.int32, count=nlinks), dtype=int
)
linktype = np.fromfile(fin, dtype=np.int32, count=nlinks)
- tankidxs = np.fromfile(fin, dtype=np.int32, count=ntanks)
- tankarea = np.fromfile(fin, dtype=np.dtype(ftype), count=ntanks)
- elevation = np.fromfile(fin, dtype=np.dtype(ftype), count=nnodes)
- linklen = np.fromfile(fin, dtype=np.dtype(ftype), count=nlinks)
- diameter = np.fromfile(fin, dtype=np.dtype(ftype), count=nlinks)
+ tankidxs = np.fromfile(fin, dtype=np.int32, count=ntanks) # noqa: F841
+ tankarea = np.fromfile(fin, dtype=np.dtype(ftype), count=ntanks) # noqa: F841
+ elevation = np.fromfile(fin, dtype=np.dtype(ftype), count=nnodes) # noqa: F841
+ linklen = np.fromfile(fin, dtype=np.dtype(ftype), count=nlinks) # noqa: F841
+ diameter = np.fromfile(fin, dtype=np.dtype(ftype), count=nlinks) # noqa: F841
"""
self.save_network_desc_line('link_start', linkstart)
self.save_network_desc_line('link_end', linkend)
@@ -1441,7 +1441,7 @@ def read(self, filename, custom_handlers=False, start_time=None):
self.save_network_desc_line('link_diameter', diameter)
"""
logger.debug('... read energy data ...')
- for i in range(npumps):
+ for i in range(npumps): # noqa: B007
pidx = int(np.fromfile(fin, dtype=np.int32, count=1))
energy = np.fromfile(fin, dtype=np.dtype(ftype), count=6)
self.save_energy_line(pidx, linknames[pidx - 1], energy)
@@ -1454,7 +1454,7 @@ def read(self, filename, custom_handlers=False, start_time=None):
+ start_time
)
nrptsteps = len(reporttimes)
- statsN = nrptsteps
+ statsN = nrptsteps # noqa: N806, F841
if statsflag in [
StatisticsType.Maximum,
StatisticsType.Minimum,
@@ -1560,8 +1560,8 @@ def read(self, filename, custom_handlers=False, start_time=None):
self.save_ep_line(
ts, ResultType.frictionfact, frictionfactor
)
- except Exception as e:
- logger.exception('Error reading or writing EP line: %s', e)
+ except Exception as e: # noqa: PERF203
+ logger.exception('Error reading or writing EP line: %s', e) # noqa: TRY401
logger.warning('Missing results from report period %d', ts)
else:
# type_list = 4*nnodes*['node'] + 8*nlinks*['link']
@@ -1593,43 +1593,43 @@ def read(self, filename, custom_handlers=False, start_time=None):
)
data = np.reshape(data, (nrptsteps, (4 * nnodes + 8 * nlinks)))
except Exception as e:
- logger.exception('Failed to process file: %s', e)
+ logger.exception('Failed to process file: %s', e) # noqa: TRY401
- df = pd.DataFrame(data.transpose(), index=index, columns=reporttimes)
- df = df.transpose()
+ df = pd.DataFrame(data.transpose(), index=index, columns=reporttimes) # noqa: PD901
+ df = df.transpose() # noqa: PD901
self.results.node = {}
self.results.link = {}
self.results.network_name = self.inp_file
# Node Results
- self.results.node['demand'] = HydParam.Demand._to_si(
+ self.results.node['demand'] = HydParam.Demand._to_si( # noqa: SLF001
self.flow_units, df['demand']
)
- self.results.node['head'] = HydParam.HydraulicHead._to_si(
+ self.results.node['head'] = HydParam.HydraulicHead._to_si( # noqa: SLF001
self.flow_units, df['head']
)
- self.results.node['pressure'] = HydParam.Pressure._to_si(
+ self.results.node['pressure'] = HydParam.Pressure._to_si( # noqa: SLF001
self.flow_units, df['pressure']
)
# Water Quality Results (node and link)
if self.quality_type is QualType.Chem:
- self.results.node['quality'] = QualParam.Concentration._to_si(
+ self.results.node['quality'] = QualParam.Concentration._to_si( # noqa: SLF001
self.flow_units, df['quality'], mass_units=self.mass_units
)
self.results.link['linkquality'] = (
- QualParam.Concentration._to_si(
+ QualParam.Concentration._to_si( # noqa: SLF001
self.flow_units,
df['linkquality'],
mass_units=self.mass_units,
)
)
elif self.quality_type is QualType.Age:
- self.results.node['quality'] = QualParam.WaterAge._to_si(
+ self.results.node['quality'] = QualParam.WaterAge._to_si( # noqa: SLF001
self.flow_units, df['quality'], mass_units=self.mass_units
)
- self.results.link['linkquality'] = QualParam.WaterAge._to_si(
+ self.results.link['linkquality'] = QualParam.WaterAge._to_si( # noqa: SLF001
self.flow_units,
df['linkquality'],
mass_units=self.mass_units,
@@ -1639,21 +1639,21 @@ def read(self, filename, custom_handlers=False, start_time=None):
self.results.link['linkquality'] = df['linkquality']
# Link Results
- self.results.link['flowrate'] = HydParam.Flow._to_si(
+ self.results.link['flowrate'] = HydParam.Flow._to_si( # noqa: SLF001
self.flow_units, df['flow']
)
self.results.link['headloss'] = df['headloss'] # Unit is per 1000
- self.results.link['velocity'] = HydParam.Velocity._to_si(
+ self.results.link['velocity'] = HydParam.Velocity._to_si( # noqa: SLF001
self.flow_units, df['velocity']
)
# self.results.link['status'] = df['linkstatus']
status = np.array(df['linkstatus'])
if self.convert_status:
- status[status <= 2] = 0
- status[status == 3] = 1
- status[status >= 5] = 1
- status[status == 4] = 2
+ status[status <= 2] = 0 # noqa: PLR2004
+ status[status == 3] = 1 # noqa: PLR2004
+ status[status >= 5] = 1 # noqa: PLR2004
+ status[status == 4] = 2 # noqa: PLR2004
self.results.link['status'] = pd.DataFrame(
data=status, columns=linknames, index=reporttimes
)
diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/results.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/results.py
index 6054c3e38..9162dcefa 100644
--- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/results.py
+++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/results.py
@@ -1,4 +1,4 @@
-from wntrfr.sim.results import SimulationResults
+from wntrfr.sim.results import SimulationResults # noqa: D100
class SimulationResults(SimulationResults):
diff --git a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/toolkit.py b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/toolkit.py
index 4b9d42b46..04da60b09 100644
--- a/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/toolkit.py
+++ b/modules/systemPerformance/REWET/REWET/EnhancedWNTR/sim/toolkit.py
@@ -1,7 +1,7 @@
"""Created on Wed May 26 16:11:36 2021
@author: snaeimi
-"""
+""" # noqa: D400
import ctypes
import logging
@@ -16,37 +16,37 @@
logger = logging.getLogger(__name__)
-class EpanetException(Exception):
+class EpanetException(Exception): # noqa: N818, D101
pass
-class ENepanet(wntrfr.epanet.toolkit.ENepanet):
- def __init__(
+class ENepanet(wntrfr.epanet.toolkit.ENepanet): # noqa: D101
+ def __init__( # noqa: C901
self,
inpfile='',
rptfile='',
binfile='',
- changed_epanet=False,
+ changed_epanet=False, # noqa: FBT002
version=2.2,
):
- if changed_epanet == False or changed_epanet == True:
+ if changed_epanet == False or changed_epanet == True: # noqa: E712, PLR1714
self.changed_epanet = changed_epanet
else:
- raise ValueError('changed_epanet must be a boolean value')
+ raise ValueError('changed_epanet must be a boolean value') # noqa: EM101, TRY003
- if changed_epanet == False:
- try:
+ if changed_epanet == False: # noqa: E712
+ try: # noqa: SIM105
super().__init__(inpfile, rptfile, binfile, version=version)
- except:
+ except: # noqa: S110, E722
pass # to add robustness for the time when for the WNTR
# cannot load the umodified DLLs for any reason
else:
- if float(version) != 2.2:
- raise ValueError(
- 'EPANET version must be 2.2 when using tegh changed version'
+ if float(version) != 2.2: # noqa: PLR2004
+ raise ValueError( # noqa: TRY003
+ 'EPANET version must be 2.2 when using tegh changed version' # noqa: EM101
)
- elif float(version) == 2.2:
+ elif float(version) == 2.2: # noqa: RET506, PLR2004
libnames = ['epanet22_mod', 'epanet22_win32_mod']
if '64' in platform.machine():
libnames.insert(0, 'epanet22_amd64_mod')
@@ -55,34 +55,34 @@ def __init__(
if os.name in ['nt', 'dos']:
libepanet = resource_filename(
__name__,
- 'Windows/%s.dll' % lib,
+ 'Windows/%s.dll' % lib, # noqa: UP031
)
self.ENlib = ctypes.windll.LoadLibrary(libepanet)
elif sys.platform == 'darwin':
libepanet = resource_filename(
__name__,
- 'Darwin/lib%s.dylib' % lib,
+ 'Darwin/lib%s.dylib' % lib, # noqa: UP031
)
self.ENlib = ctypes.cdll.LoadLibrary(libepanet)
else:
libepanet = resource_filename(
__name__,
- 'Linux/lib%s.so' % lib,
+ 'Linux/lib%s.so' % lib, # noqa: UP031
)
self.ENlib = ctypes.cdll.LoadLibrary(libepanet)
- return
- except Exception as E1:
+ return # noqa: TRY300
+ except Exception as E1: # noqa: PERF203
if lib == libnames[-1]:
- raise E1
+ raise E1 # noqa: TRY201
finally:
- if version >= 2.2 and '32' not in lib:
+ if version >= 2.2 and '32' not in lib: # noqa: PLR2004
self._project = ctypes.c_uint64()
- elif version >= 2.2:
+ elif version >= 2.2: # noqa: PLR2004
self._project = ctypes.c_uint32()
else:
self._project = None
- def ENn(self, inpfile=None, rptfile=None, binfile=None):
+ def ENn(self, inpfile=None, rptfile=None, binfile=None): # noqa: N802
"""Opens an EPANET input file and reads in network data
Parameters
@@ -94,22 +94,22 @@ def ENn(self, inpfile=None, rptfile=None, binfile=None):
binfile : str
Binary output file to create (default to constructor value)
- """
+ """ # noqa: D400, D401
inpfile = inpfile.encode('ascii')
rptfile = rptfile.encode('ascii') # ''.encode('ascii')
binfile = binfile.encode('ascii')
s = 's'
self.errcode = self.ENlib.EN_runproject(inpfile, rptfile, binfile, s)
self._error()
- if self.errcode < 100:
+ if self.errcode < 100: # noqa: PLR2004
self.fileLoaded = True
- def ENSetIgnoreFlag(self, ignore_flag=0):
- if abs(ignore_flag - np.round(ignore_flag)) > 0.00001 or ignore_flag < 0:
+ def ENSetIgnoreFlag(self, ignore_flag=0): # noqa: N802, D102
+ if abs(ignore_flag - np.round(ignore_flag)) > 0.00001 or ignore_flag < 0: # noqa: PLR2004
logger.error(
- 'ignore_flag must be int value and bigger than zero'
+ 'ignore_flag must be int value and bigger than zero' # noqa: G003
+ str(ignore_flag)
)
- flag = ctypes.c_int(int(ignore_flag))
+ flag = ctypes.c_int(int(ignore_flag)) # noqa: F841
# print('++++++++++++++++++++++')
# self.ENlib.ENEXTENDEDsetignoreflag(flag)
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Damage_Discovery_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Damage_Discovery_Designer.py
index eda130998..cb3e5665f 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Damage_Discovery_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Damage_Discovery_Designer.py
@@ -1,7 +1,7 @@
"""Created on Tue Nov 1 23:25:30 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
import pandas as pd
from PyQt5 import QtCore, QtGui, QtWidgets
@@ -9,7 +9,7 @@
from .Damage_Discovery_Window import Ui_damage_discovery
-class Damage_Discovery_Designer(Ui_damage_discovery):
+class Damage_Discovery_Designer(Ui_damage_discovery): # noqa: D101
def __init__(self, damage_discovery_model):
self._window = QtWidgets.QDialog()
self.setupUi(self._window)
@@ -57,13 +57,13 @@ def __init__(self, damage_discovery_model):
self.remove_button.clicked.connect(self.removeTimeDiscoveryRatioByButton)
self.buttonBox.accepted.connect(self.okButtonPressed)
- def discoveryRatioValidatorHelper(self, x):
+ def discoveryRatioValidatorHelper(self, x): # noqa: ARG002, N802, D102
discovery_ratio = float(self.discovery_ratio_line.text())
if discovery_ratio > 1:
self.discovery_ratio_line.setText(str(1.0))
- def enableLeakBased(self):
+ def enableLeakBased(self): # noqa: N802, D102
self.leak_based_radio.setChecked(True)
self.leak_anount_label.setEnabled(True)
@@ -77,7 +77,7 @@ def enableLeakBased(self):
self.add_button.setEnabled(False)
self.remove_button.setEnabled(False)
- def enableTimeBased(self):
+ def enableTimeBased(self): # noqa: N802, D102
self.time_based_radio.setChecked(True)
self.leak_anount_label.setEnabled(False)
@@ -91,11 +91,11 @@ def enableTimeBased(self):
self.add_button.setEnabled(True)
self.remove_button.setEnabled(True)
- def clearTimeDiscoveryRatioTable(self):
- for i in range(self.time_discovery_ratio_table.rowCount()):
+ def clearTimeDiscoveryRatioTable(self): # noqa: N802, D102
+ for i in range(self.time_discovery_ratio_table.rowCount()): # noqa: B007
self.time_discovery_ratio_table.removeRow(0)
- def okButtonPressed(self):
+ def okButtonPressed(self): # noqa: C901, N802, D102
if self.leak_based_radio.isChecked():
leak_amount = self.leak_amount_line.text()
leak_time = self.leak_time_line.text()
@@ -103,7 +103,7 @@ def okButtonPressed(self):
if leak_amount == '':
self.errorMSG('Empty Vlaue', "Please fill the 'Leak Amont' field.")
return
- elif leak_time == '':
+ elif leak_time == '': # noqa: RET505
self.errorMSG('Empty Vlaue', "Please fill the 'Leak Time' field.")
return
@@ -127,7 +127,7 @@ def okButtonPressed(self):
return
if (
- self.damage_discovery_model[
+ self.damage_discovery_model[ # noqa: E712
'time_discovery_ratio'
].is_monotonic_increasing
== False
@@ -153,7 +153,7 @@ def okButtonPressed(self):
self._window.accept()
- def populateTimeDiscoveryRatioTable(self, time_discovery_ratio):
+ def populateTimeDiscoveryRatioTable(self, time_discovery_ratio): # noqa: N802, D102
for time, discovery_ratio in time_discovery_ratio.iteritems():
number_of_rows = self.time_discovery_ratio_table.rowCount()
self.time_discovery_ratio_table.insertRow(number_of_rows)
@@ -171,7 +171,7 @@ def populateTimeDiscoveryRatioTable(self, time_discovery_ratio):
number_of_rows, 1, discovery_ratio_item
)
- def addTimeDiscoveryRatioByButton(self):
+ def addTimeDiscoveryRatioByButton(self): # noqa: N802, D102
time = self.time_line.text()
discovery_ratio = self.discovery_ratio_line.text()
@@ -201,7 +201,7 @@ def addTimeDiscoveryRatioByButton(self):
self.damage_discovery_model['time_discovery_ratio']
)
- def removeTimeDiscoveryRatioByButton(self):
+ def removeTimeDiscoveryRatioByButton(self): # noqa: N802, D102
items = self.time_discovery_ratio_table.selectedItems()
if len(items) < 1:
return
@@ -220,20 +220,20 @@ def removeTimeDiscoveryRatioByButton(self):
time_discovery_ratio = time_discovery_ratio.drop(time)
self.damage_discovery_model['time_discovery_ratio'] = time_discovery_ratio
self.clearTimeDiscoveryRatioTable()
- self.populateTimeDiscoveryRatioTable
+ self.populateTimeDiscoveryRatioTable # noqa: B018
- def methodRadioButtonToggled(self):
+ def methodRadioButtonToggled(self): # noqa: N802, D102
if self.leak_based_radio.isChecked():
self.enableLeakBased()
elif self.time_based_radio.isChecked():
self.enableTimeBased()
- def errorMSG(self, error_title, error_msg, error_more_msg=None):
+ def errorMSG(self, error_title, error_msg, error_more_msg=None): # noqa: N802, D102
error_widget = QtWidgets.QMessageBox()
error_widget.setIcon(QtWidgets.QMessageBox.Critical)
error_widget.setText(error_msg)
error_widget.setWindowTitle(error_title)
error_widget.setStandardButtons(QtWidgets.QMessageBox.Ok)
- if error_more_msg != None:
+ if error_more_msg != None: # noqa: E711
error_widget.setInformativeText(error_more_msg)
error_widget.exec_()
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Damage_Discovery_Window.py b/modules/systemPerformance/REWET/REWET/GUI/Damage_Discovery_Window.py
index 77c4b7fb4..c43c038f9 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Damage_Discovery_Window.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Damage_Discovery_Window.py
@@ -1,4 +1,4 @@
-# Form implementation generated from reading ui file 'Damage_Discovery_Window.ui'
+# Form implementation generated from reading ui file 'Damage_Discovery_Window.ui' # noqa: N999, D100
#
# Created by: PyQt5 UI code generator 5.12.3
#
@@ -8,8 +8,8 @@
from PyQt5 import QtCore, QtWidgets
-class Ui_damage_discovery:
- def setupUi(self, damage_discovery):
+class Ui_damage_discovery: # noqa: D101
+ def setupUi(self, damage_discovery): # noqa: N802, D102
damage_discovery.setObjectName('damage_discovery')
damage_discovery.resize(450, 400)
damage_discovery.setMinimumSize(QtCore.QSize(450, 400))
@@ -79,7 +79,7 @@ def setupUi(self, damage_discovery):
self.buttonBox.rejected.connect(damage_discovery.reject)
QtCore.QMetaObject.connectSlotsByName(damage_discovery)
- def retranslateUi(self, damage_discovery):
+ def retranslateUi(self, damage_discovery): # noqa: N802, D102
_translate = QtCore.QCoreApplication.translate
damage_discovery.setWindowTitle(
_translate('damage_discovery', 'Damage Discovery')
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Damage_Tab_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Damage_Tab_Designer.py
index dd7e4a64c..cfb70c997 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Damage_Tab_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Damage_Tab_Designer.py
@@ -1,7 +1,7 @@
"""Created on Fri Oct 28 12:50:24 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
import os
import pickle
@@ -14,19 +14,19 @@
from .Scenario_Dialog_Designer import Scenario_Dialog_Designer
-class Damage_Tab_Designer:
+class Damage_Tab_Designer: # noqa: D101
def __init__(self):
# self.pipe_damage_model = {"CI":{"alpha":-0.0038, "beta":0.1096, "gamma":0.0196, "a":2, "b":1 }, "DI":{"alpha":-0.0038, "beta":0.05, "gamma":0.04, "a":2, "b":1 } }
# self.node_damage_model = {'a':0.0036, 'aa':1, 'b':0, 'bb':0, 'c':-0.877, 'cc':1, 'd':0, 'dd':0, 'e':0.0248, 'ee1':1, 'ee2':1, 'f':0, 'ff1':0, 'ff2':0, "damage_node_model": "equal_diameter_emitter"}
- """These are variables that are shared between ui and settings."""
+ """These are variables that are shared between ui and settings.""" # noqa: D401
self.setDamageSettings(self.settings, self.scenario_list)
"""
Reassignment of shared variables.
"""
- self.damage_input_directory = os.getcwd()
- self.current_xlsx_directory = os.getcwd()
- if self.scenario_list == None:
+ self.damage_input_directory = os.getcwd() # noqa: PTH109
+ self.current_xlsx_directory = os.getcwd() # noqa: PTH109
+ if self.scenario_list == None: # noqa: E711
self.scenario_list = pd.DataFrame(
columns=[
'Scenario Name',
@@ -65,7 +65,7 @@ def __init__(self):
self.file_type_excel_radio.toggled.connect(self.fileTypeChanged)
self.file_type_pickle_radio.toggled.connect(self.fileTypeChanged)
- def getDamageSettings(self):
+ def getDamageSettings(self): # noqa: N802, D102
if len(self.scenario_list) < 1:
self.errorMSG('REWET', 'Damage scenario list is empty.')
return False
@@ -84,25 +84,25 @@ def getDamageSettings(self):
# self.scenario_list -- already set
return True
- def setDamageUI(self):
+ def setDamageUI(self): # noqa: N802, D102
self.damage_direcotry_line.setText(self.damage_input_directory)
self.clearScnearioTable()
self.populateScenarioTable()
- def setDamageSettings(self, settings, scenario_list):
+ def setDamageSettings(self, settings, scenario_list): # noqa: N802, D102
self.pipe_damage_model = settings.scenario['pipe_damage_model']
self.node_damage_model = settings.scenario['node_damage_model']
self.pipe_damage_input_method = settings.scenario['Pipe_damage_input_method']
self.damage_input_directory = settings.process['pipe_damage_file_directory']
self.scenario_list = scenario_list
- def addNewScenarioByButton(self):
+ def addNewScenarioByButton(self): # noqa: N802, D102
new_scenario_dialoge = Scenario_Dialog_Designer()
error = True
while error:
error = False
- return_value = new_scenario_dialoge._window.exec_()
+ return_value = new_scenario_dialoge._window.exec_() # noqa: SLF001
if return_value == 0:
return
@@ -163,13 +163,13 @@ def addNewScenarioByButton(self):
self.scneraio_validated = False
self.damage_pipe_model_reviewed = False
- def fileTypeChanged(self, checked):
+ def fileTypeChanged(self, checked): # noqa: ARG002, N802, D102
if self.file_type_excel_radio.isChecked():
self.pipe_damage_input_method = 'excel'
else:
self.pipe_damage_input_method = 'pickle'
- def removeScenarioByButton(self):
+ def removeScenarioByButton(self): # noqa: N802, D102
items = self.scenario_table.selectedItems()
if len(items) < 1:
return
@@ -193,7 +193,7 @@ def removeScenarioByButton(self):
self.scneraio_validated = False
self.damage_pipe_model_reviewed = False
- def loadScenarioByButton(self):
+ def loadScenarioByButton(self): # noqa: N802, D102
file = QtWidgets.QFileDialog.getOpenFileName(
self.asli_MainWindow,
'Open file',
@@ -216,7 +216,7 @@ def loadScenarioByButton(self):
self.scneraio_validated = False
self.damage_pipe_model_reviewed = False
- def saveScenarioByButton(self):
+ def saveScenarioByButton(self): # noqa: N802, D102
file = QtWidgets.QFileDialog.getSaveFileName(
self.asli_MainWindow,
'Save file',
@@ -228,7 +228,7 @@ def saveScenarioByButton(self):
self.scenario_list.to_excel(file[0])
- def validateScenarioByButton(self):
+ def validateScenarioByButton(self): # noqa: C901, N802, D102
self.status_text.setText('Validating Damage Scnearios')
if_validate_successful = True
text_output = ''
@@ -236,38 +236,38 @@ def validateScenarioByButton(self):
all_pipe_material = set()
- damage_pipe_not_exist_List = []
- damage_nodal_not_exist_List = []
- damage_pump_not_exist_List = []
- damage_tank_not_exist_List = []
+ damage_pipe_not_exist_List = [] # noqa: N806
+ damage_nodal_not_exist_List = [] # noqa: N806
+ damage_pump_not_exist_List = [] # noqa: N806
+ damage_tank_not_exist_List = [] # noqa: N806
- for index, row in scneario_list.iterrows():
+ for index, row in scneario_list.iterrows(): # noqa: B007
damage_pipe_name = row['Pipe Damage']
- damage_pipe_addr = os.path.join(
+ damage_pipe_addr = os.path.join( # noqa: PTH118
self.damage_input_directory, damage_pipe_name
)
- if not os.path.exists(damage_pipe_addr):
+ if not os.path.exists(damage_pipe_addr): # noqa: PTH110
damage_pipe_not_exist_List.append(damage_pipe_name)
damage_node_name = row['Nodal Damage']
- damage_nodal_addr = os.path.join(
+ damage_nodal_addr = os.path.join( # noqa: PTH118
self.damage_input_directory, damage_node_name
)
- if not os.path.exists(damage_nodal_addr):
+ if not os.path.exists(damage_nodal_addr): # noqa: PTH110
damage_nodal_not_exist_List.append(damage_node_name)
damage_pump_name = row['Pump Damage']
- damage_pump_addr = os.path.join(
+ damage_pump_addr = os.path.join( # noqa: PTH118
self.damage_input_directory, damage_pump_name
)
- if not os.path.exists(damage_pump_addr):
+ if not os.path.exists(damage_pump_addr): # noqa: PTH110
damage_pump_not_exist_List.append(damage_pump_name)
damage_tank_name = row['Tank Damage']
- damage_tank_addr = os.path.join(
+ damage_tank_addr = os.path.join( # noqa: PTH118
self.damage_input_directory, damage_tank_name
)
- if not os.path.exists(damage_tank_addr):
+ if not os.path.exists(damage_tank_addr): # noqa: PTH110
damage_tank_not_exist_List.append(damage_tank_name)
if len(damage_pipe_not_exist_List) > 0:
@@ -300,21 +300,21 @@ def validateScenarioByButton(self):
if_validate_successful = False
try:
- must_have_pipe_columns = set(
+ must_have_pipe_columns = set( # noqa: C405
['time', 'pipe_id', 'damage_loc', 'type', 'Material']
)
- for index, row in scneario_list.iterrows():
+ for index, row in scneario_list.iterrows(): # noqa: B007
damage_pipe_name = row['Pipe Damage']
if self.pipe_damage_input_method == 'excel':
pipe_damage = pd.read_excel(
- os.path.join(self.damage_input_directory, damage_pipe_name)
+ os.path.join(self.damage_input_directory, damage_pipe_name) # noqa: PTH118
)
elif self.pipe_damage_input_method == 'pickle':
- with open(
- os.path.join(self.damage_input_directory, damage_pipe_name),
+ with open( # noqa: PTH123
+ os.path.join(self.damage_input_directory, damage_pipe_name), # noqa: PTH118
'rb',
) as f:
- pipe_damage = pickle.load(f)
+ pipe_damage = pickle.load(f) # noqa: S301
index_list = pipe_damage.index
pipe_damage = pd.DataFrame.from_dict(pipe_damage.to_list())
pipe_damage.loc[:, 'time'] = index_list
@@ -344,21 +344,21 @@ def validateScenarioByButton(self):
new_material_set = set(pipe_damage['Material'].unique())
all_pipe_material = all_pipe_material.union(new_material_set)
- must_have_node_columns = set(
+ must_have_node_columns = set( # noqa: C405
['time', 'node_name', 'Number_of_damages', 'node_Pipe_Length']
)
- for index, row in scneario_list.iterrows():
+ for index, row in scneario_list.iterrows(): # noqa: B007
damage_node_name = row['Nodal Damage']
if self.pipe_damage_input_method == 'excel':
node_damage = pd.read_excel(
- os.path.join(self.damage_input_directory, damage_node_name)
+ os.path.join(self.damage_input_directory, damage_node_name) # noqa: PTH118
)
elif self.pipe_damage_input_method == 'pickle':
- with open(
- os.path.join(self.damage_input_directory, damage_node_name),
+ with open( # noqa: PTH123
+ os.path.join(self.damage_input_directory, damage_node_name), # noqa: PTH118
'rb',
) as f:
- node_damage = pickle.load(f)
+ node_damage = pickle.load(f) # noqa: S301
index_list = node_damage.index
node_damage = pd.DataFrame.from_dict(node_damage.to_list())
node_damage.loc[:, 'time'] = index_list
@@ -380,19 +380,19 @@ def validateScenarioByButton(self):
)
if_validate_successful = False
- must_have_pump_columns = set(['time', 'Pump_ID', 'Restore_time'])
- for index, row in scneario_list.iterrows():
+ must_have_pump_columns = set(['time', 'Pump_ID', 'Restore_time']) # noqa: C405
+ for index, row in scneario_list.iterrows(): # noqa: B007
damage_pump_name = row['Pump Damage']
if self.pipe_damage_input_method == 'excel':
pump_damage = pd.read_excel(
- os.path.join(self.damage_input_directory, damage_pump_name)
+ os.path.join(self.damage_input_directory, damage_pump_name) # noqa: PTH118
)
elif self.pipe_damage_input_method == 'pickle':
- with open(
- os.path.join(self.damage_input_directory, damage_pump_name),
+ with open( # noqa: PTH123
+ os.path.join(self.damage_input_directory, damage_pump_name), # noqa: PTH118
'rb',
) as f:
- pump_damage = pickle.load(f)
+ pump_damage = pickle.load(f) # noqa: S301
pump_damage = pump_damage.reset_index(drop=False)
available_columns = set(pump_damage.columns)
not_available_columns = (
@@ -415,19 +415,19 @@ def validateScenarioByButton(self):
)
if_validate_successful = False
- must_have_tank_columns = set(['time', 'Tank_ID', 'Restore_time'])
- for index, row in scneario_list.iterrows():
+ must_have_tank_columns = set(['time', 'Tank_ID', 'Restore_time']) # noqa: C405
+ for index, row in scneario_list.iterrows(): # noqa: B007
damage_tank_name = row['Tank Damage']
if self.pipe_damage_input_method == 'excel':
tank_damage = pd.read_excel(
- os.path.join(self.damage_input_directory, damage_tank_name)
+ os.path.join(self.damage_input_directory, damage_tank_name) # noqa: PTH118
)
elif self.pipe_damage_input_method == 'pickle':
- with open(
- os.path.join(self.damage_input_directory, damage_tank_name),
+ with open( # noqa: PTH123
+ os.path.join(self.damage_input_directory, damage_tank_name), # noqa: PTH118
'rb',
) as f:
- tank_damage = pickle.load(f)
+ tank_damage = pickle.load(f) # noqa: S301
tank_damage = tank_damage.reset_index(drop=False)
available_columns = set(tank_damage.columns)
not_available_columns = (
@@ -446,15 +446,15 @@ def validateScenarioByButton(self):
+ '\n'
)
if_validate_successful = False
- except Exception as exp:
- raise exp
+ except Exception as exp: # noqa: TRY302
+ raise exp # noqa: TRY201
if_validate_successful = False
text_output += (
'An error happened. File type might be wrong in addition to other problems. More information:\n'
+ repr(exp)
)
- if if_validate_successful == True:
+ if if_validate_successful == True: # noqa: E712
text_output += 'Damage Scenario List Validated Sucessfully'
not_defined_materials = all_pipe_material - set(
self.pipe_damage_model.keys()
@@ -477,29 +477,29 @@ def validateScenarioByButton(self):
self.status_text.setText(text_output)
- def pipeDamageSettingByButton(self):
- if self.scneraio_validated == False:
+ def pipeDamageSettingByButton(self): # noqa: N802, D102
+ if self.scneraio_validated == False: # noqa: E712
self.errorMSG(
'REWET',
'You must validate damage scenarios successfully before reviewing pipe damage models.',
)
return
pipe_designer = Pipe_Damage_Model_Designer(self.pipe_damage_model)
- return_value = pipe_designer._window.exec_()
+ return_value = pipe_designer._window.exec_() # noqa: SLF001
if return_value == 1:
self.pipe_damage_model = pipe_designer.pipe_damage_model
self.damage_pipe_model_reviewed = True
- def nodeDamageSettingByButton(self):
+ def nodeDamageSettingByButton(self): # noqa: N802, D102
node_designer = Node_Damage_Model_Designer(self.node_damage_model)
- return_value = node_designer._window.exec_()
+ return_value = node_designer._window.exec_() # noqa: SLF001
if return_value == 1:
self.node_damage_model = node_designer.node_damage_model
- def browseDamageDirectoryByButton(self):
+ def browseDamageDirectoryByButton(self): # noqa: N802, D102
directory = QtWidgets.QFileDialog.getExistingDirectory(
self.asli_MainWindow, 'Select Directory', self.current_xlsx_directory
)
@@ -509,7 +509,7 @@ def browseDamageDirectoryByButton(self):
self.damage_input_directory = directory
self.damage_direcotry_line.setText(directory)
- def getScnearioListFromXLSX(self, scenario_file_addr):
+ def getScnearioListFromXLSX(self, scenario_file_addr): # noqa: N802, D102
scn_list = pd.read_excel(scenario_file_addr)
must_be_headers = [
@@ -529,14 +529,14 @@ def getScnearioListFromXLSX(self, scenario_file_addr):
+ repr(not_available_headers)
)
return None
- else:
+ else: # noqa: RET505
self.status_text.setText('Opened file Successfully.')
scn_list = scn_list[must_be_headers]
- return scn_list
+ return scn_list # noqa: RET504
- def populateScenarioTable(self):
- for index, row in self.scenario_list.iterrows():
+ def populateScenarioTable(self): # noqa: N802, D102
+ for index, row in self.scenario_list.iterrows(): # noqa: B007
number_of_rows = self.scenario_table.rowCount()
self.scenario_table.insertRow(number_of_rows)
@@ -573,6 +573,6 @@ def populateScenarioTable(self):
self.scenario_table.setItem(number_of_rows, 4, tank_damage_item)
self.scenario_table.setItem(number_of_rows, 5, probability_item)
- def clearScnearioTable(self):
- for i in range(self.scenario_table.rowCount()):
+ def clearScnearioTable(self): # noqa: N802, D102
+ for i in range(self.scenario_table.rowCount()): # noqa: B007
self.scenario_table.removeRow(0)
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Hydraulic_Tab_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Hydraulic_Tab_Designer.py
index 37115d865..2c4b8e8ac 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Hydraulic_Tab_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Hydraulic_Tab_Designer.py
@@ -1,16 +1,16 @@
"""Created on Thu Oct 27 19:19:02 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
import os
from PyQt5 import QtGui, QtWidgets
-class Hydraulic_Tab_Designer:
+class Hydraulic_Tab_Designer: # noqa: D101
def __init__(self):
- """These are variables that are shared between ui and settings."""
+ """These are variables that are shared between ui and settings.""" # noqa: D401
self.setHydraulicSettings(self.settings)
"""
@@ -41,9 +41,9 @@ def __init__(self):
self.hydraulic_time_step_line.textEdited.connect(
self.hydraulicTimeStepValidatorHelper
)
- self.current_inp_directory = os.getcwd()
+ self.current_inp_directory = os.getcwd() # noqa: PTH109
- def getHydraulicSettings(self):
+ def getHydraulicSettings(self): # noqa: N802, D102
if self.wn_inp == '':
self.errorMSG(
'REWET', 'Water distribution network File must be provided'
@@ -65,7 +65,7 @@ def getHydraulicSettings(self):
return True
- def setHydraulicUI(self):
+ def setHydraulicUI(self): # noqa: N802, D102
self.wdn_addr_line.setText(self.wn_inp)
self.last_demand_ratio_value = self.demand_ratio
self.demand_ratio_line.setText(str(self.last_demand_ratio_value))
@@ -81,7 +81,7 @@ def setHydraulicUI(self):
self.required_pressure_line.setText(str(self.required_pressure))
self.hydraulic_time_step_line.setText(str(self.hydraulic_time_step))
- def setHydraulicSettings(self, settings):
+ def setHydraulicSettings(self, settings): # noqa: N802, D102
self.wn_inp = settings.process['WN_INP']
self.demand_ratio = settings.process['demand_ratio']
self.solver = settings.process['solver_type']
@@ -89,24 +89,24 @@ def setHydraulicSettings(self, settings):
self.required_pressure = settings.scenario['required_pressure']
self.hydraulic_time_step = settings.scenario['hydraulic_time_step']
- def demandRatioValidatorHelper(self, x):
+ def demandRatioValidatorHelper(self, x): # noqa: N802, D102
if float(x) > 1:
self.demand_ratio_line.setText(self.last_demand_ratio_value)
else:
self.last_demand_ratio_value = x
# print(x)
- def hydraulicTimeStepValidatorHelper(self, x):
+ def hydraulicTimeStepValidatorHelper(self, x): # noqa: ARG002, N802, D102
try:
hydraulic_time_step = int(float(self.hydraulic_time_step_line.text()))
- except:
+ except: # noqa: E722
hydraulic_time_step = 0
simulation_time_step = int(float(self.simulation_time_step_line.text()))
if hydraulic_time_step > simulation_time_step:
self.hydraulic_time_step_line.setText(str(simulation_time_step))
- def wdnFileBroweserClicked(self):
+ def wdnFileBroweserClicked(self): # noqa: N802, D102
file = QtWidgets.QFileDialog.getOpenFileName(
self.asli_MainWindow,
'Open file',
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Input_IO.py b/modules/systemPerformance/REWET/REWET/GUI/Input_IO.py
index a43970896..bbdb83776 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Input_IO.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Input_IO.py
@@ -1,93 +1,93 @@
-import os
+import os # noqa: N999, D100
import pickle
import pandas as pd
# Read files From Pickle #####################
-def read_pipe_damage_seperate_pickle_file(directory, all_damages_file_name):
- file_dest = os.path.join(directory, all_damages_file_name)
- with open(file_dest, 'rb') as f:
- _all_damages = pickle.load(f)
+def read_pipe_damage_seperate_pickle_file(directory, all_damages_file_name): # noqa: D103
+ file_dest = os.path.join(directory, all_damages_file_name) # noqa: PTH118
+ with open(file_dest, 'rb') as f: # noqa: PTH123
+ _all_damages = pickle.load(f) # noqa: S301
- return _all_damages
+ return _all_damages # noqa: RET504
-def read_node_damage_seperate_pickle_file(directory, all_damages_file_name):
- file_dest = os.path.join(directory, all_damages_file_name)
- with open(file_dest, 'rb') as f:
- _node_damages = pickle.load(f)
+def read_node_damage_seperate_pickle_file(directory, all_damages_file_name): # noqa: D103
+ file_dest = os.path.join(directory, all_damages_file_name) # noqa: PTH118
+ with open(file_dest, 'rb') as f: # noqa: PTH123
+ _node_damages = pickle.load(f) # noqa: S301
- return _node_damages
+ return _node_damages # noqa: RET504
-def read_tank_damage_seperate_pickle_file(directory, tank_damages_file_name):
- file_dest = os.path.join(directory, tank_damages_file_name)
- with open(file_dest, 'rb') as f:
- _tank_damages = pickle.load(f)
+def read_tank_damage_seperate_pickle_file(directory, tank_damages_file_name): # noqa: D103
+ file_dest = os.path.join(directory, tank_damages_file_name) # noqa: PTH118
+ with open(file_dest, 'rb') as f: # noqa: PTH123
+ _tank_damages = pickle.load(f) # noqa: S301
- return _tank_damages
+ return _tank_damages # noqa: RET504
-def read_pump_damage_seperate_pickle_file(directory, pump_damages_file_name):
- file_dest = os.path.join(directory, pump_damages_file_name)
- with open(file_dest, 'rb') as f:
- _pump_damages = pickle.load(f)
+def read_pump_damage_seperate_pickle_file(directory, pump_damages_file_name): # noqa: D103
+ file_dest = os.path.join(directory, pump_damages_file_name) # noqa: PTH118
+ with open(file_dest, 'rb') as f: # noqa: PTH123
+ _pump_damages = pickle.load(f) # noqa: S301
- return _pump_damages
+ return _pump_damages # noqa: RET504
# Read files From Excel #####################
-def read_pipe_damage_seperate_EXCEL_file(directory, pipe_damages_file_name):
+def read_pipe_damage_seperate_EXCEL_file(directory, pipe_damages_file_name): # noqa: N802, D103
ss = None
- file_dest = os.path.join(directory, pipe_damages_file_name)
+ file_dest = os.path.join(directory, pipe_damages_file_name) # noqa: PTH118
ss = pd.read_excel(file_dest)
ss.sort_values(
['pipe_id', 'damage_time', 'damage_loc'],
ascending=[True, True, False],
ignore_index=True,
- inplace=True,
+ inplace=True, # noqa: PD002
)
unique_time = ss.groupby(['pipe_id']).time.unique()
if 1 in [
0 if len(i) <= 1 else 1 for i in unique_time
]: # checks if there are any pipe id with more than two unique time values
- raise ValueError(
- 'All damage location for one pipe should happen at the same time'
+ raise ValueError( # noqa: TRY003
+ 'All damage location for one pipe should happen at the same time' # noqa: EM101
)
- ss.set_index('time', inplace=True)
+ ss.set_index('time', inplace=True) # noqa: PD002
ss.pipe_id = ss.pipe_id.astype(str)
return pd.Series(ss.to_dict('records'), index=ss.index)
-def read_node_damage_seperate_EXCEL_file(directory, node_damages_file_name):
+def read_node_damage_seperate_EXCEL_file(directory, node_damages_file_name): # noqa: N802, D103
ss = None
- file_dest = os.path.join(directory, node_damages_file_name)
+ file_dest = os.path.join(directory, node_damages_file_name) # noqa: PTH118
ss = pd.read_excel(file_dest)
- ss.set_index('time', inplace=True)
+ ss.set_index('time', inplace=True) # noqa: PD002
ss.node_name = ss.node_name.astype(str)
return pd.Series(ss.to_dict('records'), index=ss.index)
-def read_tank_damage_seperate_EXCEL_file(directory, tank_damages_file_name):
+def read_tank_damage_seperate_EXCEL_file(directory, tank_damages_file_name): # noqa: N802, D103
ss = None
- file_dest = os.path.join(directory, tank_damages_file_name)
+ file_dest = os.path.join(directory, tank_damages_file_name) # noqa: PTH118
ss = pd.read_excel(file_dest)
# ss.set_index('Tank_ID', inplace=True)
- ss.set_index('time', inplace=True)
+ ss.set_index('time', inplace=True) # noqa: PD002
ss.Tank_ID = ss.Tank_ID.astype(str)
# ss = ss['Tank_ID']
return ss
-def read_pump_damage_seperate_EXCEL_file(directory, pump_damages_file_name):
+def read_pump_damage_seperate_EXCEL_file(directory, pump_damages_file_name): # noqa: N802, D103
ss = None
- file_dest = os.path.join(directory, pump_damages_file_name)
+ file_dest = os.path.join(directory, pump_damages_file_name) # noqa: PTH118
ss = pd.read_excel(file_dest)
- ss.set_index('time', inplace=True)
+ ss.set_index('time', inplace=True) # noqa: PD002
ss.Pump_ID = ss.Pump_ID.astype(str)
return ss
@@ -95,14 +95,14 @@ def read_pump_damage_seperate_EXCEL_file(directory, pump_damages_file_name):
# Save Results #####################
-def save_single(settings, result, name, restoration_data):
+def save_single(settings, result, name, restoration_data): # noqa: D103
result_file_directory = settings.process['result_directory']
result_name = name + '.res'
settings_name = name + '.xlsx'
- file_dest = os.path.join(result_file_directory, result_name)
- print('Saving: ' + str(file_dest))
- with open(file_dest, 'wb') as f:
+ file_dest = os.path.join(result_file_directory, result_name) # noqa: PTH118
+ print('Saving: ' + str(file_dest)) # noqa: T201
+ with open(file_dest, 'wb') as f: # noqa: PTH123
pickle.dump(result, f)
process_set = pd.Series(settings.process.settings)
@@ -111,14 +111,14 @@ def save_single(settings, result, name, restoration_data):
process_set.to_list() + scenario_set.to_list(),
index=process_set.index.to_list() + scenario_set.index.to_list(),
)
- file_dest = os.path.join(result_file_directory, settings_name)
+ file_dest = os.path.join(result_file_directory, settings_name) # noqa: PTH118
_set.to_excel(file_dest)
if settings.process['dmg_rst_data_save']:
# file_dest = os.path.join(result_file_directory, 'restoration_file.pkl')
# rest_data_out = pd.DataFrame.from_dict(restoration_data)
# rest_data_out.to_pickle(file_dest)
- file_dest = os.path.join(result_file_directory, name + '_registry.pkl')
- print('Saving: ' + str(file_dest))
- with open(file_dest, 'wb') as f:
+ file_dest = os.path.join(result_file_directory, name + '_registry.pkl') # noqa: PTH118
+ print('Saving: ' + str(file_dest)) # noqa: T201
+ with open(file_dest, 'wb') as f: # noqa: PTH123
pickle.dump(restoration_data, f)
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Main_Help_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Main_Help_Designer.py
index 263b1613b..84e70e8ac 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Main_Help_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Main_Help_Designer.py
@@ -1,14 +1,14 @@
"""Created on Wed Nov 2 13:25:40 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
from PyQt5 import QtWidgets
from .Main_Help_Window import Ui_Main_Help_Window
-class Main_Help_Designer(Ui_Main_Help_Window):
+class Main_Help_Designer(Ui_Main_Help_Window): # noqa: D101
def __init__(self):
self._window = QtWidgets.QDialog()
self.setupUi(self._window)
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Main_Help_Window.py b/modules/systemPerformance/REWET/REWET/GUI/Main_Help_Window.py
index 2554ef566..5c4a8c5f6 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Main_Help_Window.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Main_Help_Window.py
@@ -1,4 +1,4 @@
-# Form implementation generated from reading ui file 'Main_Help_Window.ui'
+# Form implementation generated from reading ui file 'Main_Help_Window.ui' # noqa: N999, D100
#
# Created by: PyQt5 UI code generator 5.12.3
#
@@ -8,8 +8,8 @@
from PyQt5 import QtCore, QtGui, QtWidgets
-class Ui_Main_Help_Window:
- def setupUi(self, Main_Help_Window):
+class Ui_Main_Help_Window: # noqa: D101
+ def setupUi(self, Main_Help_Window): # noqa: N802, N803, D102
Main_Help_Window.setObjectName('Main_Help_Window')
Main_Help_Window.resize(680, 320)
Main_Help_Window.setMinimumSize(QtCore.QSize(680, 320))
@@ -31,11 +31,11 @@ def setupUi(self, Main_Help_Window):
self.gridLayout_4 = QtWidgets.QGridLayout()
self.gridLayout_4.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.gridLayout_4.setObjectName('gridLayout_4')
- spacerItem = QtWidgets.QSpacerItem(
+ spacerItem = QtWidgets.QSpacerItem( # noqa: N806
50, 0, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.gridLayout_4.addItem(spacerItem, 1, 1, 1, 1)
- spacerItem1 = QtWidgets.QSpacerItem(
+ spacerItem1 = QtWidgets.QSpacerItem( # noqa: N806
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.gridLayout_4.addItem(spacerItem1, 0, 0, 1, 1)
@@ -44,14 +44,14 @@ def setupUi(self, Main_Help_Window):
self.label_4.setPixmap(QtGui.QPixmap(':/resources/resources/both_logos.jpg'))
self.label_4.setObjectName('label_4')
self.gridLayout_4.addWidget(self.label_4, 0, 1, 1, 1)
- spacerItem2 = QtWidgets.QSpacerItem(
+ spacerItem2 = QtWidgets.QSpacerItem( # noqa: N806
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.gridLayout_4.addItem(spacerItem2, 0, 2, 1, 1)
self.main_layout.addLayout(self.gridLayout_4)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName('horizontalLayout_2')
- spacerItem3 = QtWidgets.QSpacerItem(
+ spacerItem3 = QtWidgets.QSpacerItem( # noqa: N806
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.horizontalLayout_2.addItem(spacerItem3)
@@ -65,7 +65,7 @@ def setupUi(self, Main_Help_Window):
self.retranslateUi(Main_Help_Window)
QtCore.QMetaObject.connectSlotsByName(Main_Help_Window)
- def retranslateUi(self, Main_Help_Window):
+ def retranslateUi(self, Main_Help_Window): # noqa: N802, N803, D102
_translate = QtCore.QCoreApplication.translate
Main_Help_Window.setWindowTitle(_translate('Main_Help_Window', 'Help'))
self.label.setText(
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Map_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Map_Designer.py
index a6d63f9a6..d5b0b00a5 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Map_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Map_Designer.py
@@ -1,7 +1,7 @@
"""Created on Thu Nov 10 18:29:50 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
import geopandas as gpd
import matplotlib.pyplot as plt
@@ -90,14 +90,14 @@
cmap = plt.cm.RdYlGn
-class Time_Unit_Combo(QtWidgets.QComboBox):
+class Time_Unit_Combo(QtWidgets.QComboBox): # noqa: D101
def __init__(self):
super().__init__()
time_units = ['second', 'hour', 'day']
self.addItems(time_units)
- def changeMapTimeUnit(self, raw_time_map, value_columns_name):
+ def changeMapTimeUnit(self, raw_time_map, value_columns_name): # noqa: N802, D102
time_justified_map = raw_time_map.copy()
time_unit = self.currentText()
@@ -107,7 +107,7 @@ def changeMapTimeUnit(self, raw_time_map, value_columns_name):
if time_unit == 'second':
return raw_time_map.copy()
- elif time_unit == 'hour':
+ elif time_unit == 'hour': # noqa: RET505
data = data / 3600
elif time_unit == 'day':
data = data / 3600 / 24
@@ -119,13 +119,13 @@ def changeMapTimeUnit(self, raw_time_map, value_columns_name):
return time_justified_map
-class Yes_No_Combo(QtWidgets.QComboBox):
+class Yes_No_Combo(QtWidgets.QComboBox): # noqa: D101
def __init__(self):
super().__init__()
self.addItems(['No', 'Yes'])
-class Map_Designer:
+class Map_Designer: # noqa: D101
def __init__(self):
self.current_raw_map = None
self.current_map = None
@@ -183,24 +183,24 @@ def __init__(self):
self.initializeMap()
- def initializeMap(self):
+ def initializeMap(self): # noqa: N802, D102
self.setMapAllScenarios(True)
self.map_all_scenarios_checkbox.setChecked(True)
self.map_scenario_combo.clear()
self.map_scenario_combo.addItems(self.result_scenarios)
# self.current_map_data = None
- def symbologyByButton(self):
+ def symbologyByButton(self): # noqa: N802, D102
sym = Symbology_Designer(
self.symbology, self.plotted_map, self.map_value_columns_name
)
- val = sym._window.exec_()
+ val = sym._window.exec_() # noqa: SLF001
if val == 1:
self.symbology = sym.sym
self.plotMap(self.map_value_columns_name)
- def majorTickSet(self):
+ def majorTickSet(self): # noqa: N802, D102
major_tick_fond_size = self.major_tick_size_line.text()
major_tick_fond_size = float(major_tick_fond_size)
@@ -209,7 +209,7 @@ def majorTickSet(self):
)
self.mpl_map.canvas.fig.canvas.draw_idle()
- def openSubsituteLayerWindow(self):
+ def openSubsituteLayerWindow(self): # noqa: N802, D102
demand_node_temporary_layer = (
self.project_result.createGeopandasPointDataFrameForNodes()
)
@@ -219,7 +219,7 @@ def openSubsituteLayerWindow(self):
self.iUse_substitute_layer,
demand_node_temporary_layer,
)
- val = sub_layer._window.exec_()
+ val = sub_layer._window.exec_() # noqa: SLF001
if val == 1:
self.subsitute_layer_addr = sub_layer.subsitute_layer_addr
@@ -227,7 +227,7 @@ def openSubsituteLayerWindow(self):
self.iUse_substitute_layer = sub_layer.iUse_substitute_layer
self.plotMap(self.map_value_columns_name)
- def annotationRadiusChanegd(self):
+ def annotationRadiusChanegd(self): # noqa: N802, D102
annotation_radius = self.annotation_radius_line.text()
self.annotation_map = self.plotted_map.copy(deep=True)
if annotation_radius == '':
@@ -237,43 +237,43 @@ def annotationRadiusChanegd(self):
for ind, val in self.current_map.geometry.iteritems():
self.annotation_map.geometry.loc[ind] = val.buffer(annotation_radius)
- def AnnotationCheckboxChanged(self, state):
+ def AnnotationCheckboxChanged(self, state): # noqa: N802, D102
if state == 0:
self.annotation_event_combo.setEnabled(False)
self.annotation_radius_line.setEnabled(False)
self.anottation_type = 'None'
self.annot.set_visible(False)
- elif state == 2:
+ elif state == 2: # noqa: PLR2004
self.annotation_event_combo.setEnabled(True)
self.annotation_radius_line.setEnabled(True)
self.getAnnotationtype()
- def mapAllScenarioCheckboxChanged(self, state):
+ def mapAllScenarioCheckboxChanged(self, state): # noqa: N802, D102
if state == 0:
self.setMapAllScenarios(False)
- elif state == 2:
+ elif state == 2: # noqa: PLR2004
self.setMapAllScenarios(True)
- def getAnnotationtype(self, text=None):
+ def getAnnotationtype(self, text=None): # noqa: ARG002, N802, D102
combo_value = self.annotation_event_combo.currentText()
- if combo_value == 'Mouse hover' or combo_value == 'Mouse click':
+ if combo_value == 'Mouse hover' or combo_value == 'Mouse click': # noqa: PLR1714
self.anottation_type = combo_value
else:
raise ValueError('unknown annotation type: ' + repr(combo_value))
- def mouseHovered(self, event):
+ def mouseHovered(self, event): # noqa: N802, D102
if self.anottation_type != 'Mouse hover':
return
- if type(self.current_map) == type(None):
+ if type(self.current_map) == type(None): # noqa: E721
return
self.putAnnotation(event)
- def mouseClicked(self, event):
+ def mouseClicked(self, event): # noqa: N802, D102
if self.anottation_type != 'Mouse click':
return
- if type(self.current_map) == type(None):
+ if type(self.current_map) == type(None): # noqa: E721
return
if event.button != 1:
@@ -281,14 +281,14 @@ def mouseClicked(self, event):
self.putAnnotation(event)
- def putAnnotation(self, event):
+ def putAnnotation(self, event): # noqa: N802, D102
vis = self.annot.get_visible()
if event.inaxes == self.mpl_map.canvas.ax:
# print((event.xdata, event.ydata) )
mouse_point = Point(event.xdata, event.ydata)
s = self.annotation_map.geometry.contains(mouse_point)
- s_index_list = s[s == True].index
+ s_index_list = s[s == True].index # noqa: E712
if len(s_index_list) >= 1:
cont = True
@@ -299,7 +299,7 @@ def putAnnotation(self, event):
if cont:
# print(len(s_index_list))
data = self.annotation_map.loc[s_index, self.map_value_columns_name]
- if type(data) == pd.core.series.Series:
+ if type(data) == pd.core.series.Series: # noqa: E721
data = data.iloc[0]
text = repr(data)
self.update_annot(text, event)
@@ -309,17 +309,17 @@ def putAnnotation(self, event):
self.annot.set_visible(False)
self.mpl_map.canvas.fig.canvas.draw_idle()
- def update_annot(self, text, event):
+ def update_annot(self, text, event): # noqa: D102
self.annot.xy = (event.xdata, event.ydata)
self.annot.set_text(text)
self.annot.get_bbox_patch().set_facecolor(cmap(norm(1)))
self.annot.get_bbox_patch().set_alpha(0.4)
- def clearMapPlot(self):
+ def clearMapPlot(self): # noqa: N802, D102
self.mpl_map.canvas.ax.cla()
- def plotMap(self, value_columns_name):
+ def plotMap(self, value_columns_name): # noqa: N802, D102
self.clearMapPlot()
self.mpl_map.canvas.ax.clear()
# for ind, val in self.current_map.geometry.iteritems():
@@ -333,12 +333,12 @@ def plotMap(self, value_columns_name):
xy=(0, 0),
xytext=(20, 20),
textcoords='offset points',
- bbox=dict(boxstyle='round', fc='w'),
- arrowprops=dict(arrowstyle='->'),
+ bbox=dict(boxstyle='round', fc='w'), # noqa: C408
+ arrowprops=dict(arrowstyle='->'), # noqa: C408
)
self.annot.set_visible(False)
- if self.iUse_substitute_layer == True:
+ if self.iUse_substitute_layer == True: # noqa: E712
data = data.set_crs(crs=self.subsitute_layer.crs)
joined_map = gpd.sjoin(self.subsitute_layer, data)
# joined_map.plot(ax=self.mpl_map.canvas.ax, column=value_columns_name, cmap="Blues", legend=True)
@@ -367,7 +367,7 @@ def plotMap(self, value_columns_name):
self.mpl_map.canvas.draw()
self.mpl_map.canvas.fig.tight_layout()
- def prepareForLegend(self, data, value_columns_name):
+ def prepareForLegend(self, data, value_columns_name): # noqa: N802, D102
return data.copy(deep=True)
data = data.copy(deep=True)
min_value = data[value_columns_name].min()
@@ -391,14 +391,14 @@ def prepareForLegend(self, data, value_columns_name):
return data
- def setMapAllScenarios(self, flag):
- if flag == True:
+ def setMapAllScenarios(self, flag): # noqa: N802, D102
+ if flag == True: # noqa: E712
self.map_all_scenarios_checkbox.setChecked(True)
self.map_scenario_combo.setEnabled(False)
self.map_type_combo.clear()
self.map_type_combo.addItems(multi_scenario_map_options)
self.clearMapPlot()
- elif flag == False:
+ elif flag == False: # noqa: E712
self.map_all_scenarios_checkbox.setChecked(False)
self.map_scenario_combo.setEnabled(True)
self.map_type_combo.clear()
@@ -407,28 +407,28 @@ def setMapAllScenarios(self, flag):
else:
raise ValueError('Unknown flag: ' + repr(flag))
- def resultScenarioChanged(self, text):
+ def resultScenarioChanged(self, text): # noqa: N802, D102
self.map_result_current_scenario = text # self.map_scenario_combo.getText()
- def mapTypeChanegd(self, text):
- if self.project_result == None:
+ def mapTypeChanegd(self, text): # noqa: N802, D102
+ if self.project_result == None: # noqa: E711
return
self.current_map_type = text
self.setMapSettingBox(text)
self.calculateCurrentMap()
- def calculateCurrentMap(self):
+ def calculateCurrentMap(self): # noqa: C901, N802, D102
map_type = self.current_map_type
if map_type == 'Quantity Outage vs. Exceedance':
- iConsider_leak = self.map_settings_widgets['LDN leak'].currentText()
+ iConsider_leak = self.map_settings_widgets['LDN leak'].currentText() # noqa: N806
leak_ratio = self.map_settings_widgets['leak Criteria'].text()
time_window = self.map_settings_widgets['Time Window'].text()
exeedance_probability = self.map_settings_widgets['Ex. Prob.'].text()
if iConsider_leak == 'Yes':
- iConsider_leak = True
+ iConsider_leak = True # noqa: N806
else:
- iConsider_leak = False
+ iConsider_leak = False # noqa: N806
leak_ratio = float(leak_ratio)
time_window = int(float(time_window))
@@ -456,15 +456,15 @@ def calculateCurrentMap(self):
self.plotMap(self.map_value_columns_name)
elif map_type == 'Delivery Outage vs. Exceedance':
- iConsider_leak = self.map_settings_widgets['LDN leak'].currentText()
+ iConsider_leak = self.map_settings_widgets['LDN leak'].currentText() # noqa: N806
leak_ratio = self.map_settings_widgets['leak Criteria'].text()
time_window = self.map_settings_widgets['Time Window'].text()
exeedance_probability = self.map_settings_widgets['Ex. Prob.'].text()
if iConsider_leak == 'Yes':
- iConsider_leak = True
+ iConsider_leak = True # noqa: N806
else:
- iConsider_leak = False
+ iConsider_leak = False # noqa: N806
leak_ratio = float(leak_ratio)
time_window = int(float(time_window))
@@ -492,15 +492,15 @@ def calculateCurrentMap(self):
self.plotMap(self.map_value_columns_name)
elif map_type == 'Quantity Exceedance vs. Time':
- iConsider_leak = self.map_settings_widgets['LDN leak'].currentText()
+ iConsider_leak = self.map_settings_widgets['LDN leak'].currentText() # noqa: N806
leak_ratio = self.map_settings_widgets['leak Criteria'].text()
time_window = self.map_settings_widgets['Time Window'].text()
outage_time = self.map_settings_widgets['Outage Time'].text()
if iConsider_leak == 'Yes':
- iConsider_leak = True
+ iConsider_leak = True # noqa: N806
else:
- iConsider_leak = False
+ iConsider_leak = False # noqa: N806
leak_ratio = float(leak_ratio)
time_window = int(float(time_window))
@@ -528,15 +528,15 @@ def calculateCurrentMap(self):
self.plotMap(self.map_value_columns_name)
elif map_type == 'Delivery Exceedance vs. Time':
- iConsider_leak = self.map_settings_widgets['LDN leak'].currentText()
+ iConsider_leak = self.map_settings_widgets['LDN leak'].currentText() # noqa: N806
leak_ratio = self.map_settings_widgets['leak Criteria'].text()
time_window = self.map_settings_widgets['Time Window'].text()
outage_time = self.map_settings_widgets['Outage Time'].text()
if iConsider_leak == 'Yes':
- iConsider_leak = True
+ iConsider_leak = True # noqa: N806
else:
- iConsider_leak = False
+ iConsider_leak = False # noqa: N806
leak_ratio = float(leak_ratio)
time_window = int(float(time_window))
@@ -564,14 +564,14 @@ def calculateCurrentMap(self):
self.plotMap(self.map_value_columns_name)
elif map_type == 'Quantity Return':
- iConsider_leak = self.map_settings_widgets['LDN leak'].currentText()
+ iConsider_leak = self.map_settings_widgets['LDN leak'].currentText() # noqa: N806
leak_ratio = self.map_settings_widgets['leak Criteria'].text()
time_window = self.map_settings_widgets['Time Window'].text()
if iConsider_leak == 'Yes':
- iConsider_leak = True
+ iConsider_leak = True # noqa: N806
else:
- iConsider_leak = False
+ iConsider_leak = False # noqa: N806
leak_ratio = float(leak_ratio)
time_window = int(float(time_window))
@@ -593,14 +593,14 @@ def calculateCurrentMap(self):
self.map_value_columns_name = value_column_label
elif map_type == 'Delivery Return':
- iConsider_leak = self.map_settings_widgets['LDN leak'].currentText()
+ iConsider_leak = self.map_settings_widgets['LDN leak'].currentText() # noqa: N806
leak_ratio = self.map_settings_widgets['leak Criteria'].text()
time_window = self.map_settings_widgets['Time Window'].text()
if iConsider_leak == 'Yes':
- iConsider_leak = True
+ iConsider_leak = True # noqa: N806
else:
- iConsider_leak = False
+ iConsider_leak = False # noqa: N806
leak_ratio = float(leak_ratio)
time_window = int(float(time_window))
@@ -624,7 +624,7 @@ def calculateCurrentMap(self):
elif map_type == 'SSI':
return
# self.current_map_data = (map_type, pd.DataFrame())
- iPopulation = self.map_settings_widgets['Population'].currentText()
+ iPopulation = self.map_settings_widgets['Population'].currentText() # noqa: N806
scn_name = self.map_scenario_combo.currentText()
self.current_raw_map = (
self.project_result.getSystemServiceabilityIndexMap(
@@ -638,13 +638,13 @@ def calculateCurrentMap(self):
elif map_type == '':
return
else:
- raise
+ raise # noqa: PLE0704
# self.annotation_map = self.current_raw_map.copy()
self.annotationRadiusChanegd()
- def setMapSettingBox(self, map_type):
- for i in range(self.map_settings_table.rowCount()):
+ def setMapSettingBox(self, map_type): # noqa: N802, D102
+ for i in range(self.map_settings_table.rowCount()): # noqa: B007
self.map_settings_table.removeRow(0)
if map_type in map_settings:
@@ -653,7 +653,7 @@ def setMapSettingBox(self, map_type):
pass
# raise ValueError("Unknown Map type: "+repr(map_type))
- def populateMapSettingsTable(self, settings_content):
+ def populateMapSettingsTable(self, settings_content): # noqa: C901, N802, D102
self.map_settings_widgets.clear()
vertical_header = []
cell_type_list = []
@@ -718,7 +718,7 @@ def populateMapSettingsTable(self, settings_content):
current_widget = QtWidgets.QLineEdit()
self.map_settings_table.setCellWidget(i, 0, current_widget)
current_widget.editingFinished.connect(self.mapSettingChanged)
- if validator_list[i] == None:
+ if validator_list[i] == None: # noqa: E711
current_widget.setValidator(
QtGui.QDoubleValidator(
0,
@@ -746,7 +746,7 @@ def populateMapSettingsTable(self, settings_content):
self.map_settings_table.setCellWidget(i, 0, current_widget)
current_widget.editingFinished.connect(self.mapSettingChanged)
- if validator_list[i] == None:
+ if validator_list[i] == None: # noqa: E711
current_widget.setValidator(
QtGui.QIntValidator(0, 3600 * 24 * 1000)
)
@@ -763,35 +763,35 @@ def populateMapSettingsTable(self, settings_content):
else:
raise ValueError(repr(cell_type))
- i += 1
+ i += 1 # noqa: SIM113
# for label in settings_content:
- def mapTimeSettingsChanged(self, x):
+ def mapTimeSettingsChanged(self, x): # noqa: ARG002, N802, D102
self.current_map = self.time_combo.changeMapTimeUnit(
self.current_raw_map, self.map_value_columns_name
)
self.plotMap(self.map_value_columns_name)
- def mapSettingChanged(self):
+ def mapSettingChanged(self): # noqa: N802, D102
if 'Population' in self.map_settings_widgets:
new_population_setting = self.map_settings_widgets[
'Population'
].currentText()
- if new_population_setting == 'Yes' and type(
- self.project_result._population_data
+ if new_population_setting == 'Yes' and type( # noqa: E721
+ self.project_result._population_data # noqa: SLF001
) == type(None):
self.errorMSG('Error', 'Population data is not loaded')
self.map_settings_widgets['Population'].setCurrentText('No')
return
self.calculateCurrentMap()
- def tabChangedMap(self, index):
+ def tabChangedMap(self, index): # noqa: N802, D102
if index == 1:
self.initializeMap()
- def saveCurrentMapByButton(self):
+ def saveCurrentMapByButton(self): # noqa: N802, D102
# if self.current_map_data == None:
- if type(self.current_map) == type(None):
+ if type(self.current_map) == type(None): # noqa: E721
self.errorMSG('REWET', 'No map is ploted')
return
diff --git a/modules/systemPerformance/REWET/REWET/GUI/MplWidget.py b/modules/systemPerformance/REWET/REWET/GUI/MplWidget.py
index c5ca1b848..5d1c71e27 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/MplWidget.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/MplWidget.py
@@ -1,7 +1,7 @@
"""Created on Thu Nov 10 18:26:02 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
# Imports
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as Canvas
@@ -12,7 +12,7 @@
from PyQt5 import QtWidgets
-class MplCanvas(Canvas):
+class MplCanvas(Canvas): # noqa: D101
def __init__(self):
self.fig = Figure(figsize=(100, 40), dpi=100, tight_layout=True)
self.ax = self.fig.add_subplot(111)
@@ -24,7 +24,7 @@ def __init__(self):
# Matplotlib widget
-class MplWidget(QtWidgets.QWidget):
+class MplWidget(QtWidgets.QWidget): # noqa: D101
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent) # Inherit from QWidget
self.canvas = MplCanvas() # Create canvas object
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Discovery_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Discovery_Designer.py
index ed37f0367..d2679d7e7 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Discovery_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Discovery_Designer.py
@@ -1,12 +1,12 @@
"""Created on Tue Nov 1 23:25:30 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
from .Damage_Discovery_Designer import Damage_Discovery_Designer
-class Node_Damage_Discovery_Designer(Damage_Discovery_Designer):
+class Node_Damage_Discovery_Designer(Damage_Discovery_Designer): # noqa: D101
def __init__(self, node_damage_discovery_model):
super().__init__(node_damage_discovery_model)
self._window.setWindowTitle('Node Damage Discovery')
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Designer.py
index 3bab4d169..4cae2f921 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Designer.py
@@ -1,7 +1,7 @@
"""Created on Tue Nov 1 20:36:29 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
from PyQt5 import QtGui, QtWidgets
@@ -9,7 +9,7 @@
from .Node_Damage_Model_Window import Ui_Node_Damage_Model
-class Node_Damage_Model_Designer(Ui_Node_Damage_Model):
+class Node_Damage_Model_Designer(Ui_Node_Damage_Model): # noqa: D101
def __init__(self, node_damage_model):
self._window = QtWidgets.QDialog()
self.setupUi(self._window)
@@ -170,11 +170,11 @@ def __init__(self, node_damage_model):
self.buttonBox.accepted.connect(self.okButtonPressed)
self.help_button.clicked.connect(self.showHelpByButton)
- def showHelpByButton(self):
+ def showHelpByButton(self): # noqa: N802, D102
help_dialog_box = Node_Damage_Model_Help_Designer()
- help_dialog_box._window.exec_()
+ help_dialog_box._window.exec_() # noqa: SLF001
- def okButtonPressed(self):
+ def okButtonPressed(self): # noqa: C901, N802, D102
a = self.a_line.text()
b = self.b_line.text()
c = self.c_line.text()
@@ -260,12 +260,12 @@ def okButtonPressed(self):
self._window.accept()
- def errorMSG(self, error_title, error_msg, error_more_msg=None):
+ def errorMSG(self, error_title, error_msg, error_more_msg=None): # noqa: N802, D102
error_widget = QtWidgets.QMessageBox()
error_widget.setIcon(QtWidgets.QMessageBox.Critical)
error_widget.setText(error_msg)
error_widget.setWindowTitle(error_title)
error_widget.setStandardButtons(QtWidgets.QMessageBox.Ok)
- if error_more_msg != None:
+ if error_more_msg != None: # noqa: E711
error_widget.setInformativeText(error_more_msg)
error_widget.exec_()
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Help_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Help_Designer.py
index c965d60ae..f50f8c3aa 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Help_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Help_Designer.py
@@ -1,14 +1,14 @@
"""Created on Tue Nov 1 21:35:02 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
from PyQt5 import QtWidgets
from .Node_Damage_Model_Help_Window import Ui_Node_Damage_Model_Help
-class Node_Damage_Model_Help_Designer(Ui_Node_Damage_Model_Help):
+class Node_Damage_Model_Help_Designer(Ui_Node_Damage_Model_Help): # noqa: D101
def __init__(self):
self._window = QtWidgets.QDialog()
self.setupUi(self._window)
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Help_Window.py b/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Help_Window.py
index a901e316b..399534c4f 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Help_Window.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Help_Window.py
@@ -1,4 +1,4 @@
-# Form implementation generated from reading ui file 'Node_Damage_Model_Help_Window.ui'
+# Form implementation generated from reading ui file 'Node_Damage_Model_Help_Window.ui' # noqa: N999, D100
#
# Created by: PyQt5 UI code generator 5.12.3
#
@@ -8,8 +8,8 @@
from PyQt5 import QtCore, QtWidgets
-class Ui_Node_Damage_Model_Help:
- def setupUi(self, Node_Damage_Model_Help):
+class Ui_Node_Damage_Model_Help: # noqa: D101
+ def setupUi(self, Node_Damage_Model_Help): # noqa: N802, N803, D102
Node_Damage_Model_Help.setObjectName('Node_Damage_Model_Help')
Node_Damage_Model_Help.resize(340, 130)
Node_Damage_Model_Help.setMinimumSize(QtCore.QSize(340, 130))
@@ -31,7 +31,7 @@ def setupUi(self, Node_Damage_Model_Help):
self.verticalLayout_2.addWidget(self.label_3)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName('horizontalLayout_2')
- spacerItem = QtWidgets.QSpacerItem(
+ spacerItem = QtWidgets.QSpacerItem( # noqa: N806
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.horizontalLayout_2.addItem(spacerItem)
@@ -45,7 +45,7 @@ def setupUi(self, Node_Damage_Model_Help):
self.retranslateUi(Node_Damage_Model_Help)
QtCore.QMetaObject.connectSlotsByName(Node_Damage_Model_Help)
- def retranslateUi(self, Node_Damage_Model_Help):
+ def retranslateUi(self, Node_Damage_Model_Help): # noqa: N802, N803, D102
_translate = QtCore.QCoreApplication.translate
Node_Damage_Model_Help.setWindowTitle(
_translate('Node_Damage_Model_Help', 'Help')
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Window.py b/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Window.py
index 72efc9edb..d1523836d 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Window.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Node_Damage_Model_Window.py
@@ -1,4 +1,4 @@
-# Form implementation generated from reading ui file 'Node_Damage_Model_Window.ui'
+# Form implementation generated from reading ui file 'Node_Damage_Model_Window.ui' # noqa: N999, D100
#
# Created by: PyQt5 UI code generator 5.12.3
#
@@ -8,8 +8,8 @@
from PyQt5 import QtCore, QtGui, QtWidgets
-class Ui_Node_Damage_Model:
- def setupUi(self, Node_Damage_Model):
+class Ui_Node_Damage_Model: # noqa: D101
+ def setupUi(self, Node_Damage_Model): # noqa: N802, N803, D102, PLR0915
Node_Damage_Model.setObjectName('Node_Damage_Model')
Node_Damage_Model.resize(396, 296)
palette = QtGui.QPalette()
@@ -185,7 +185,7 @@ def setupUi(self, Node_Damage_Model):
self.buttonBox.rejected.connect(Node_Damage_Model.reject)
QtCore.QMetaObject.connectSlotsByName(Node_Damage_Model)
- def retranslateUi(self, Node_Damage_Model):
+ def retranslateUi(self, Node_Damage_Model): # noqa: N802, N803, D102
_translate = QtCore.QCoreApplication.translate
Node_Damage_Model.setWindowTitle(
_translate('Node_Damage_Model', 'Node Damage Model')
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Opening_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Opening_Designer.py
index 5bcbbc8d2..6e80ebe39 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Opening_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Opening_Designer.py
@@ -1,7 +1,7 @@
"""Created on Thu Oct 27 18:06:01 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
import os
import pickle
@@ -24,7 +24,7 @@
from .Simulation_Tab_Designer import Simulation_Tab_Designer
-class Opening_Designer(
+class Opening_Designer( # noqa: D101
Ui_Opening_Window,
Simulation_Tab_Designer,
Hydraulic_Tab_Designer,
@@ -40,7 +40,7 @@ def __init__(self):
self.scenario_list = None
self.settings = Settings()
self.settings.initializeScenarioSettings(None)
- self.current_project_directory = os.getcwd()
+ self.current_project_directory = os.getcwd() # noqa: PTH109
self.project_file_addr = None
self.asli_app = QtWidgets.QApplication([])
@@ -76,21 +76,21 @@ def __init__(self):
"""
self.asli_MainWindow.closeEvent = self.exitApp
- def run(self):
+ def run(self): # noqa: D102
self.asli_MainWindow.show()
sys.exit(self.asli_app.exec_())
- def errorMSG(self, error_title, error_msg, error_more_msg=None):
+ def errorMSG(self, error_title, error_msg, error_more_msg=None): # noqa: N802, D102
error_widget = QtWidgets.QMessageBox()
error_widget.setIcon(QtWidgets.QMessageBox.Critical)
error_widget.setText(error_msg)
error_widget.setWindowTitle(error_title)
error_widget.setStandardButtons(QtWidgets.QMessageBox.Ok)
- if error_more_msg != None:
+ if error_more_msg != None: # noqa: E711
error_widget.setInformativeText(error_more_msg)
error_widget.exec_()
- def questionPrompt(self, title, msg, more_msg=None):
+ def questionPrompt(self, title, msg, more_msg=None): # noqa: N802, D102
prompt_widget = QtWidgets.QMessageBox()
prompt_widget.setIcon(QtWidgets.QMessageBox.Question)
prompt_widget.setText(msg)
@@ -100,11 +100,11 @@ def questionPrompt(self, title, msg, more_msg=None):
| QtWidgets.QMessageBox.No
| QtWidgets.QMessageBox.Cancel
)
- if more_msg != None:
+ if more_msg != None: # noqa: E711
prompt_widget.setInformativeText(more_msg)
return prompt_widget.exec_()
- def openProject(self):
+ def openProject(self): # noqa: N802, D102
file = QtWidgets.QFileDialog.getOpenFileName(
self.asli_MainWindow,
'Select project file',
@@ -117,8 +117,8 @@ def openProject(self):
self.current_project_directory = split_addr
self.project_file_addr = file[0]
- with open(file[0], 'rb') as f:
- project = pickle.load(f)
+ with open(file[0], 'rb') as f: # noqa: PTH123
+ project = pickle.load(f) # noqa: S301
self.project = project
# sina put a possible check of result version here
self.setSimulationSettings(project.project_settings)
@@ -130,7 +130,7 @@ def openProject(self):
self.setDamageUI()
self.setRestorationUI()
- def saveProject(self, save_as=False):
+ def saveProject(self, save_as=False): # noqa: FBT002, N802, D102
data_retrived = False
if self.getSimulationSettings():
if self.getHydraulicSettings():
@@ -138,11 +138,11 @@ def saveProject(self, save_as=False):
if self.getRestorationSettings():
data_retrived = True
- if data_retrived == False:
+ if data_retrived == False: # noqa: E712
return False
- if save_as == False:
- if self.project_file_addr == None:
+ if save_as == False: # noqa: E712
+ if self.project_file_addr == None: # noqa: E711
file_addr = QtWidgets.QFileDialog.getSaveFileName(
self.asli_MainWindow,
'Save project file',
@@ -157,14 +157,14 @@ def saveProject(self, save_as=False):
project = Project(self.settings, self.scenario_list)
self.project = project
- with open(self.project_file_addr, 'wb') as f:
+ with open(self.project_file_addr, 'wb') as f: # noqa: PTH123
pickle.dump(project, f)
return True
- def saveProjectAs(self):
+ def saveProjectAs(self): # noqa: N802, D102
if_saved = self.saveProject(save_as=True)
- if if_saved == False:
+ if if_saved == False: # noqa: E712
return
file_addr = QtWidgets.QFileDialog.getSaveFileName(
@@ -181,27 +181,27 @@ def saveProjectAs(self):
project = Project(self.settings, self.scenario_list)
self.project = project
- with open(self.project_file_addr, 'wb') as f:
+ with open(self.project_file_addr, 'wb') as f: # noqa: PTH123
pickle.dump(project, f)
- def showHelpWindow(self):
+ def showHelpWindow(self): # noqa: N802, D102
help_window = Main_Help_Designer()
- help_window._window.exec_()
+ help_window._window.exec_() # noqa: SLF001
- def exitApp(self, event):
+ def exitApp(self, event): # noqa: N802, D102
return_value = self.questionPrompt(
'REWET', 'Do you want to save the project before you leave?'
)
- if return_value == 16384: # Yes
+ if return_value == 16384: # Yes # noqa: PLR2004
if_saved = self.saveProject()
if if_saved:
event.accept()
else:
event.ignore()
- elif return_value == 65536: # None
+ elif return_value == 65536: # None # noqa: PLR2004
event.accept()
- elif return_value == 4194304: # Cancel
+ elif return_value == 4194304: # Cancel # noqa: PLR2004
event.ignore()
return
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Opening_Window.py b/modules/systemPerformance/REWET/REWET/GUI/Opening_Window.py
index ecbaa49c3..b7eaf7ff3 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Opening_Window.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Opening_Window.py
@@ -1,4 +1,4 @@
-# Form implementation generated from reading ui file 'Opening.ui'
+# Form implementation generated from reading ui file 'Opening.ui' # noqa: N999, D100
#
# Created by: PyQt5 UI code generator 5.12.3
#
@@ -8,8 +8,8 @@
from PyQt5 import QtCore, QtGui, QtWidgets
-class Ui_Opening_Window:
- def setupUi(self, Opening_Window):
+class Ui_Opening_Window: # noqa: D101
+ def setupUi(self, Opening_Window): # noqa: N802, N803, D102, PLR0915
Opening_Window.setObjectName('Opening_Window')
Opening_Window.resize(830, 780)
self.centralwidget = QtWidgets.QWidget(Opening_Window)
@@ -892,7 +892,7 @@ def setupUi(self, Opening_Window):
self.results_tabs_widget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Opening_Window)
- def retranslateUi(self, Opening_Window):
+ def retranslateUi(self, Opening_Window): # noqa: N802, N803, D102, PLR0915
_translate = QtCore.QCoreApplication.translate
Opening_Window.setWindowTitle(_translate('Opening_Window', 'REWET'))
self.groupBox_4.setTitle(_translate('Opening_Window', 'Temp File Settings'))
@@ -1102,7 +1102,7 @@ def retranslateUi(self, Opening_Window):
item.setText(_translate('Opening_Window', 'Time Shift'))
item = self.curve_settings_table.horizontalHeaderItem(0)
item.setText(_translate('Opening_Window', 'Values'))
- __sortingEnabled = self.curve_settings_table.isSortingEnabled()
+ __sortingEnabled = self.curve_settings_table.isSortingEnabled() # noqa: N806
self.curve_settings_table.setSortingEnabled(False)
self.curve_settings_table.setSortingEnabled(__sortingEnabled)
self.label_25.setText(_translate('Opening_Window', 'Scenario'))
@@ -1123,7 +1123,7 @@ def retranslateUi(self, Opening_Window):
item.setText(_translate('Opening_Window', 'Time Shift'))
item = self.map_settings_table.horizontalHeaderItem(0)
item.setText(_translate('Opening_Window', 'Values'))
- __sortingEnabled = self.map_settings_table.isSortingEnabled()
+ __sortingEnabled = self.map_settings_table.isSortingEnabled() # noqa: N806
self.map_settings_table.setSortingEnabled(False)
self.map_settings_table.setSortingEnabled(__sortingEnabled)
self.label_36.setText(_translate('Opening_Window', 'Settings'))
@@ -1174,7 +1174,7 @@ def retranslateUi(self, Opening_Window):
)
-from .MplWidget import MplWidget
+from .MplWidget import MplWidget # noqa: E402
if __name__ == '__main__':
import sys
diff --git a/modules/systemPerformance/REWET/REWET/GUI/PP_Data_Tab_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/PP_Data_Tab_Designer.py
index b0a7e8a71..c438176ec 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/PP_Data_Tab_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/PP_Data_Tab_Designer.py
@@ -1,7 +1,7 @@
"""Created on Thu Dec 29 15:41:03 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
import os
@@ -10,7 +10,7 @@
from Result_Project import Project_Result
-class PP_Data_Tab:
+class PP_Data_Tab: # noqa: D101
def __init__(self, project):
self.pp_project = project
# self.__settings = settings
@@ -24,8 +24,8 @@ def __init__(self, project):
self.project_result = None
self.current_population_directory = ''
- def initalizeResultData(self):
- if self.project == None:
+ def initalizeResultData(self): # noqa: N802, D102
+ if self.project == None: # noqa: E711
self.errorMSG(
'Error', 'No project is found. open or save a new project.'
)
@@ -39,8 +39,8 @@ def initalizeResultData(self):
)
self.clearResultData()
- print(self.project_result.scn_name_list_that_result_file_not_found)
- for index, row in self.scenario_list.iterrows():
+ print(self.project_result.scn_name_list_that_result_file_not_found) # noqa: T201
+ for index, row in self.scenario_list.iterrows(): # noqa: B007
number_of_rows = self.result_file_status_table.rowCount()
scenario_name = row['Scenario Name']
scenario_item = QtWidgets.QTableWidgetItem(scenario_name)
@@ -61,19 +61,19 @@ def initalizeResultData(self):
for scenario_name in self.result_scenarios:
try:
self.project_result.loadScneariodata(scenario_name)
- except Exception:
+ except Exception: # noqa: BLE001, PERF203
self.errorMSG('Error', 'Error occurred in reading data')
self.clearResultData()
- raise Exception
+ raise Exception # noqa: B904, TRY002
return
- self.results_tabs_widget.setTabEnabled(1, True)
+ self.results_tabs_widget.setTabEnabled(1, True) # noqa: FBT003
- def clearResultData(self):
- for i in range(self.result_file_status_table.rowCount()):
+ def clearResultData(self): # noqa: N802, D102
+ for i in range(self.result_file_status_table.rowCount()): # noqa: B007
self.result_file_status_table.removeRow(0)
- def resultLoadButtonPressed(self):
+ def resultLoadButtonPressed(self): # noqa: N802, D102
# data_retrived = False
# if self.getSimulationSettings():
# if self.getHydraulicSettings():
@@ -86,7 +86,7 @@ def resultLoadButtonPressed(self):
self.initalizeResultData()
- def browsePopulationData(self):
+ def browsePopulationData(self): # noqa: N802, D102
file = QtWidgets.QFileDialog.getOpenFileName(
self.asli_MainWindow,
'Open file',
@@ -100,7 +100,7 @@ def browsePopulationData(self):
self.population_addr_line.setText(file[0])
- print(file)
+ print(file) # noqa: T201
if file[1] == 'Excel file (*.xlsx)':
self.population_data = pd.read_excel(file[0])
elif file[1] == 'CSV File (*.csv)':
@@ -117,10 +117,10 @@ def browsePopulationData(self):
self.population_data.columns.to_list()
)
- if len(self.population_data.columns.to_list()) >= 2:
+ if len(self.population_data.columns.to_list()) >= 2: # noqa: PLR2004
self.population_population_header_combo.setCurrentIndex(1)
- def loadPopulationData(self):
+ def loadPopulationData(self): # noqa: N802, D102
node_id_header = self.population_node_ID_header_combo.currentText()
population_header = self.population_population_header_combo.currentText()
@@ -137,7 +137,7 @@ def loadPopulationData(self):
)
return
- if self.project_result == None:
+ if self.project_result == None: # noqa: E711
self.errorMSG(
'Error', 'No project and data is loaded. Please load the data first.'
)
@@ -147,12 +147,12 @@ def loadPopulationData(self):
self.population_data, node_id_header, population_header
)
- def errorMSG(self, error_title, error_msg, error_more_msg=None):
+ def errorMSG(self, error_title, error_msg, error_more_msg=None): # noqa: N802, D102
error_widget = QtWidgets.QMessageBox()
error_widget.setIcon(QtWidgets.QMessageBox.Critical)
error_widget.setText(error_msg)
error_widget.setWindowTitle(error_title)
error_widget.setStandardButtons(QtWidgets.QMessageBox.Ok)
- if error_more_msg != None:
+ if error_more_msg != None: # noqa: E711
error_widget.setInformativeText(error_more_msg)
error_widget.exec_()
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Discovery_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Discovery_Designer.py
index 2ee5860a1..7e29f3fa3 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Discovery_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Discovery_Designer.py
@@ -1,12 +1,12 @@
"""Created on Tue Nov 1 23:25:30 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
from .Damage_Discovery_Designer import Damage_Discovery_Designer
-class Pipe_Damage_Discovery_Designer(Damage_Discovery_Designer):
+class Pipe_Damage_Discovery_Designer(Damage_Discovery_Designer): # noqa: D101
def __init__(self, pipe_damage_discovery_model):
super().__init__(pipe_damage_discovery_model)
self._window.setWindowTitle('Pipe Damage Discovery')
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Discovery_Window.py b/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Discovery_Window.py
index fcc9c19fc..16b9a3bf8 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Discovery_Window.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Discovery_Window.py
@@ -1,4 +1,4 @@
-# Form implementation generated from reading ui file 'Pipe_Damage_Discovery_Window.ui'
+# Form implementation generated from reading ui file 'Pipe_Damage_Discovery_Window.ui' # noqa: N999, D100
#
# Created by: PyQt5 UI code generator 5.12.3
#
@@ -8,8 +8,8 @@
from PyQt5 import QtCore, QtWidgets
-class Ui_pipe_damage_discovery:
- def setupUi(self, pipe_damage_discovery):
+class Ui_pipe_damage_discovery: # noqa: D101
+ def setupUi(self, pipe_damage_discovery): # noqa: N802, D102
pipe_damage_discovery.setObjectName('pipe_damage_discovery')
pipe_damage_discovery.resize(450, 400)
pipe_damage_discovery.setMinimumSize(QtCore.QSize(450, 400))
@@ -79,7 +79,7 @@ def setupUi(self, pipe_damage_discovery):
self.buttonBox.rejected.connect(pipe_damage_discovery.reject)
QtCore.QMetaObject.connectSlotsByName(pipe_damage_discovery)
- def retranslateUi(self, pipe_damage_discovery):
+ def retranslateUi(self, pipe_damage_discovery): # noqa: N802, D102
_translate = QtCore.QCoreApplication.translate
pipe_damage_discovery.setWindowTitle(
_translate('pipe_damage_discovery', 'Pipe Damaeg Discovery')
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Model_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Model_Designer.py
index 177720ee1..01b6ff657 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Model_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Model_Designer.py
@@ -1,14 +1,14 @@
"""Created on Tue Nov 1 18:32:32 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
from PyQt5 import QtGui, QtWidgets
from .Pipe_Damage_Model_Window import Ui_Pipe_Damage_Model
-class Pipe_Damage_Model_Designer(Ui_Pipe_Damage_Model):
+class Pipe_Damage_Model_Designer(Ui_Pipe_Damage_Model): # noqa: D101
def __init__(self, pipe_damage_model):
self._window = QtWidgets.QDialog()
self.setupUi(self._window)
@@ -59,8 +59,8 @@ def __init__(self, pipe_damage_model):
self.material_list.currentItemChanged.connect(self.materialChanged)
- def materialChanged(self, current_item, previous_item):
- if previous_item != None:
+ def materialChanged(self, current_item, previous_item): # noqa: N802, D102
+ if previous_item != None: # noqa: E711
previous_material = previous_item.text()
alpha = self.alpha_line.text()
@@ -88,7 +88,7 @@ def materialChanged(self, current_item, previous_item):
self.a_line.setText(str(a))
self.b_line.setText(str(b))
- def okButtonPressed(self):
+ def okButtonPressed(self): # noqa: N802, D102
current_material = self.material_list.selectedItems()[0].text()
alpha = self.alpha_line.text()
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Model_Window.py b/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Model_Window.py
index cb548cf1d..5f6253e95 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Model_Window.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Pipe_Damage_Model_Window.py
@@ -1,4 +1,4 @@
-# Form implementation generated from reading ui file 'Pipe_Damage_Model_Window.ui'
+# Form implementation generated from reading ui file 'Pipe_Damage_Model_Window.ui' # noqa: N999, D100
#
# Created by: PyQt5 UI code generator 5.12.3
#
@@ -8,8 +8,8 @@
from PyQt5 import QtCore, QtGui, QtWidgets
-class Ui_Pipe_Damage_Model:
- def setupUi(self, Pipe_Damage_Model):
+class Ui_Pipe_Damage_Model: # noqa: D101
+ def setupUi(self, Pipe_Damage_Model): # noqa: N802, N803, D102
Pipe_Damage_Model.setObjectName('Pipe_Damage_Model')
Pipe_Damage_Model.resize(377, 372)
self.buttonBox = QtWidgets.QDialogButtonBox(Pipe_Damage_Model)
@@ -100,7 +100,7 @@ def setupUi(self, Pipe_Damage_Model):
self.buttonBox.rejected.connect(Pipe_Damage_Model.reject)
QtCore.QMetaObject.connectSlotsByName(Pipe_Damage_Model)
- def retranslateUi(self, Pipe_Damage_Model):
+ def retranslateUi(self, Pipe_Damage_Model): # noqa: N802, N803, D102
_translate = QtCore.QCoreApplication.translate
Pipe_Damage_Model.setWindowTitle(
_translate('Pipe_Damage_Model', 'Pipe Damage Model')
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Pump_Damage_Discovery_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Pump_Damage_Discovery_Designer.py
index 9c77e780b..e63358d0a 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Pump_Damage_Discovery_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Pump_Damage_Discovery_Designer.py
@@ -1,12 +1,12 @@
"""Created on Tue Nov 1 23:25:30 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
from .Damage_Discovery_Designer import Damage_Discovery_Designer
-class Pump_Damage_Discovery_Designer(Damage_Discovery_Designer):
+class Pump_Damage_Discovery_Designer(Damage_Discovery_Designer): # noqa: D101
def __init__(self, pump_damage_discovery_model):
super().__init__(pump_damage_discovery_model)
self._window.setWindowTitle('Pump Damage Discovery')
diff --git a/modules/systemPerformance/REWET/REWET/GUI/REWET_Resource_rc.py b/modules/systemPerformance/REWET/REWET/GUI/REWET_Resource_rc.py
index 28480a571..11273d7ec 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/REWET_Resource_rc.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/REWET_Resource_rc.py
@@ -1,4 +1,4 @@
-# Resource object code
+# Resource object code # noqa: N999, D100
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.12.9)
#
@@ -6948,13 +6948,13 @@
qt_resource_struct = qt_resource_struct_v2
-def qInitResources():
+def qInitResources(): # noqa: N802, D103
QtCore.qRegisterResourceData(
rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data
)
-def qCleanupResources():
+def qCleanupResources(): # noqa: N802, D103
QtCore.qUnregisterResourceData(
rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data
)
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Restoration_Tab_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Restoration_Tab_Designer.py
index b55c6af5d..b4bae11b5 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Restoration_Tab_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Restoration_Tab_Designer.py
@@ -1,7 +1,7 @@
"""Created on Wed Nov 2 00:24:43 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
import os
@@ -13,15 +13,15 @@
from .Tank_Damage_Discovery_Designer import Tank_Damage_Discovery_Designer
-class Restoration_Tab_Designer:
+class Restoration_Tab_Designer: # noqa: D101
def __init__(self):
- """These are variables that are shared between ui and settings."""
+ """These are variables that are shared between ui and settings.""" # noqa: D401
self.setRestorationSettings(self.settings)
"""
Reassignment of shared variables.
"""
- self.current_policy_directory = os.getcwd()
+ self.current_policy_directory = os.getcwd() # noqa: PTH109
"""
ui value assignments.
@@ -50,14 +50,14 @@ def __init__(self):
self.tankDamageDiscoveryByButton
)
- def getRestorationSettings(self):
+ def getRestorationSettings(self): # noqa: N802, D102
if self.restoration_on_radio.isChecked():
self.restoration_on = True
elif self.restoration_off_radio.isChecked():
self.restoration_on = False
else:
- raise ValueError(
- 'None of Restoration-on/off buttons are checked which is an error.'
+ raise ValueError( # noqa: TRY003
+ 'None of Restoration-on/off buttons are checked which is an error.' # noqa: EM101
)
if self.script_txt_radio.isChecked():
@@ -65,8 +65,8 @@ def getRestorationSettings(self):
elif self.script_rrp_radio.isChecked():
self.restoraion_policy_type = 'binary'
else:
- raise ValueError(
- 'None of File-Type buttons are checked which is an error.'
+ raise ValueError( # noqa: TRY003
+ 'None of File-Type buttons are checked which is an error.' # noqa: EM101
)
self.minimum_job_time = int(float(self.minimum_job_time_line.text()))
@@ -99,10 +99,10 @@ def getRestorationSettings(self):
return True
- def setRestorationUI(self):
- if self.restoration_on == True:
+ def setRestorationUI(self): # noqa: N802, D102
+ if self.restoration_on == True: # noqa: E712
self.restoration_on_radio.setChecked(True)
- elif self.restoration_on == False:
+ elif self.restoration_on == False: # noqa: E712
self.restoration_off_radio.setChecked(True)
else:
raise ValueError(
@@ -124,9 +124,9 @@ def setRestorationUI(self):
self.minimum_job_time_line.setText(str(self.minimum_job_time))
- if self.out_of_zone_allowed == True:
+ if self.out_of_zone_allowed == True: # noqa: E712
self.out_of_zone_travel_yes.setChecked(True)
- elif self.out_of_zone_allowed == False:
+ elif self.out_of_zone_allowed == False: # noqa: E712
self.out_of_zone_travel_no.setChecked(True)
else:
raise ValueError(
@@ -134,7 +134,7 @@ def setRestorationUI(self):
+ repr(self.out_of_zone_travel_no)
)
- def setRestorationSettings(self, settings):
+ def setRestorationSettings(self, settings): # noqa: N802, D102
self.restoration_on = settings.process['Restoration_on']
self.restoraion_policy_type = settings.scenario['Restoraion_policy_type']
self.restoraion_policy_addr = settings.scenario['Restortion_config_file']
@@ -153,7 +153,7 @@ def setRestorationSettings(self, settings):
]
self.out_of_zone_allowed = settings.scenario['crew_out_of_zone_travel']
- def browsePolicyDefinitionFile(self):
+ def browsePolicyDefinitionFile(self): # noqa: N802, D102
if self.script_txt_radio.isChecked():
file_type = 'scenrario text file (*.txt)'
elif self.script_rrp_radio.isChecked():
@@ -172,44 +172,44 @@ def browsePolicyDefinitionFile(self):
self.restoraion_policy_addr = file[0]
self.policy_definition_addr_line.setText(file[0])
- def pipeDamageDiscoveryByButton(self):
+ def pipeDamageDiscoveryByButton(self): # noqa: N802, D102
pipe_damage_discovery_designer = Pipe_Damage_Discovery_Designer(
self.pipe_damage_discovery_model
)
- return_value = pipe_damage_discovery_designer._window.exec_()
+ return_value = pipe_damage_discovery_designer._window.exec_() # noqa: SLF001
if return_value == 1:
self.pipe_damage_discovery_model = (
pipe_damage_discovery_designer.damage_discovery_model
)
- def nodeDamageDiscoveryByButton(self):
+ def nodeDamageDiscoveryByButton(self): # noqa: N802, D102
node_damage_discovery_designer = Node_Damage_Discovery_Designer(
self.node_damage_discovery_model
)
- return_value = node_damage_discovery_designer._window.exec_()
+ return_value = node_damage_discovery_designer._window.exec_() # noqa: SLF001
if return_value == 1:
self.node_damage_discovery_model = (
node_damage_discovery_designer.damage_discovery_model
)
- def pumpDamageDiscoveryByButton(self):
+ def pumpDamageDiscoveryByButton(self): # noqa: N802, D102
pump_damage_discovery_designer = Pump_Damage_Discovery_Designer(
self.pump_damage_discovery_model
)
- return_value = pump_damage_discovery_designer._window.exec_()
+ return_value = pump_damage_discovery_designer._window.exec_() # noqa: SLF001
if return_value == 1:
self.pump_damage_discovery_model = (
pump_damage_discovery_designer.damage_discovery_model
)
- def tankDamageDiscoveryByButton(self):
+ def tankDamageDiscoveryByButton(self): # noqa: N802, D102
tank_damage_discovery_designer = Tank_Damage_Discovery_Designer(
self.tank_damage_discovery_model
)
- return_value = tank_damage_discovery_designer._window.exec_()
+ return_value = tank_damage_discovery_designer._window.exec_() # noqa: SLF001
if return_value == 1:
self.tank_damage_discovery_model = (
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Result_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Result_Designer.py
index 65d408a33..ef734eb67 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Result_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Result_Designer.py
@@ -1,7 +1,7 @@
"""Created on Thu Nov 10 18:29:50 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
import pandas as pd
from PyQt5 import QtGui, QtWidgets
@@ -72,16 +72,16 @@
}
-class Time_Unit_Combo(QtWidgets.QComboBox):
+class Time_Unit_Combo(QtWidgets.QComboBox): # noqa: D101
def __init__(self):
super().__init__()
time_units = ['second', 'hour', 'day']
self.addItems(time_units)
- def changeCurveTimeUnit(self, raw_time_curve):
+ def changeCurveTimeUnit(self, raw_time_curve): # noqa: N802, D102
res = {}
- if type(raw_time_curve) == pd.core.series.Series:
+ if type(raw_time_curve) == pd.core.series.Series: # noqa: E721
time_justified_curve = raw_time_curve.copy()
res = self.applyUnitToSeries(time_justified_curve)
else:
@@ -90,7 +90,7 @@ def changeCurveTimeUnit(self, raw_time_curve):
res[k] = self.applyUnitToSeries(time_justified_curve)
return res
- def applyUnitToSeries(self, data):
+ def applyUnitToSeries(self, data): # noqa: N802, D102
time_unit = self.currentText()
if time_unit == 'second':
pass
@@ -104,13 +104,13 @@ def applyUnitToSeries(self, data):
return data
-class Yes_No_Combo(QtWidgets.QComboBox):
+class Yes_No_Combo(QtWidgets.QComboBox): # noqa: D101
def __init__(self):
super().__init__()
self.addItems(['No', 'Yes'])
-class Result_Designer:
+class Result_Designer: # noqa: D101
def __init__(self):
self.current_raw_curve = None
self.current_curve = None
@@ -125,23 +125,23 @@ def __init__(self):
self.initalize_result()
- def initalize_result(self):
+ def initalize_result(self): # noqa: D102
self.setCurveAllScenarios(True)
self.all_scenarios_checkbox.setChecked(True)
self.scenario_combo.clear()
self.scenario_combo.addItems(self.result_scenarios)
# self.current_curve_data = None
- def curveAllScenarioCheckboxChanged(self, state):
+ def curveAllScenarioCheckboxChanged(self, state): # noqa: N802, D102
if state == 0:
self.setCurveAllScenarios(False)
- elif state == 2:
+ elif state == 2: # noqa: PLR2004
self.setCurveAllScenarios(True)
- def clearCurvePlot(self):
+ def clearCurvePlot(self): # noqa: N802, D102
self.mpl_curve.canvas.ax.cla()
- def plot_data(self):
+ def plot_data(self): # noqa: D102
x = range(10)
y = range(0, 20, 2)
self.mpl_curve.canvas.ax.plot(x, y)
@@ -150,16 +150,16 @@ def plot_data(self):
# self.mpl_curve.canvas.ax.set_xlabel("x_label")
# self.mpl_curve.canvas.fig.tight_layout()
- def plotCurve(self, y_label=None, x_label=None):
- if y_label == None:
+ def plotCurve(self, y_label=None, x_label=None): # noqa: N802, D102
+ if y_label == None: # noqa: E711
y_label = self.mpl_curve.canvas.ax.get_ylabel()
- if x_label == None:
+ if x_label == None: # noqa: E711
x_label = self.mpl_curve.canvas.ax.get_xlabel()
self.mpl_curve.canvas.ax.clear()
data = self.current_curve
- if type(data) == pd.core.series.Series:
+ if type(data) == pd.core.series.Series: # noqa: E721
self.mpl_curve.canvas.ax.plot(
self.current_curve.index, self.current_curve.to_list()
)
@@ -172,14 +172,14 @@ def plotCurve(self, y_label=None, x_label=None):
self.mpl_curve.canvas.draw()
self.mpl_curve.canvas.fig.tight_layout()
- def setCurveAllScenarios(self, flag):
- if flag == True:
+ def setCurveAllScenarios(self, flag): # noqa: N802, D102
+ if flag == True: # noqa: E712
self.all_scenarios_checkbox.setChecked(True)
self.scenario_combo.setEnabled(False)
self.curve_type_combo.clear()
self.curve_type_combo.addItems(multi_scenario_curve_options)
self.clearCurvePlot()
- elif flag == False:
+ elif flag == False: # noqa: E712
self.all_scenarios_checkbox.setChecked(False)
self.scenario_combo.setEnabled(True)
self.curve_type_combo.clear()
@@ -188,23 +188,23 @@ def setCurveAllScenarios(self, flag):
else:
raise ValueError('Unknown flag: ' + repr(flag))
- def resultScenarioChanged(self, text):
+ def resultScenarioChanged(self, text): # noqa: N802, D102
self.result_current_scenario = text # self.scenario_combo.getText()
# self.current_curve_data = None
- def curveTypeChanegd(self, text):
- if self.project_result == None:
+ def curveTypeChanegd(self, text): # noqa: N802, D102
+ if self.project_result == None: # noqa: E711
return
self.current_curve_type = text
self.setCurveSettingBox(text)
self.calculateCurrentCurve()
- def calculateCurrentCurve(self):
+ def calculateCurrentCurve(self): # noqa: C901, N802, D102
curve_type = self.current_curve_type
if curve_type == 'Quantity Exceedance':
- iPopulation = self.curve_settings_widgets['Population'].currentText()
- iRatio = self.curve_settings_widgets['Percentage'].currentText()
- iConsider_leak = self.curve_settings_widgets['LDN leak'].currentText()
+ iPopulation = self.curve_settings_widgets['Population'].currentText() # noqa: N806
+ iRatio = self.curve_settings_widgets['Percentage'].currentText() # noqa: N806
+ iConsider_leak = self.curve_settings_widgets['LDN leak'].currentText() # noqa: N806
leak_ratio = self.curve_settings_widgets['leak Criteria'].text()
group_method = self.curve_settings_widgets['Group method'].currentText()
daily_bin = self.curve_settings_widgets['Daily bin'].currentText()
@@ -212,14 +212,14 @@ def calculateCurrentCurve(self):
max_time = self.curve_settings_widgets['Max time'].text()
if iConsider_leak == 'Yes':
- iConsider_leak = True
+ iConsider_leak = True # noqa: N806
else:
- iConsider_leak = False
+ iConsider_leak = False # noqa: N806
if iRatio == 'Yes':
- iRatio = True
+ iRatio = True # noqa: N806
else:
- iRatio = False
+ iRatio = False # noqa: N806
if daily_bin == 'Yes':
daily_bin = True
@@ -246,9 +246,9 @@ def calculateCurrentCurve(self):
self.plotCurve('Exceedance Probability', 'Time')
elif curve_type == 'Delivery Exceedance':
- iPopulation = self.curve_settings_widgets['Population'].currentText()
- iRatio = self.curve_settings_widgets['Percentage'].currentText()
- iConsider_leak = self.curve_settings_widgets['LDN leak'].currentText()
+ iPopulation = self.curve_settings_widgets['Population'].currentText() # noqa: N806
+ iRatio = self.curve_settings_widgets['Percentage'].currentText() # noqa: N806
+ iConsider_leak = self.curve_settings_widgets['LDN leak'].currentText() # noqa: N806
leak_ratio = self.curve_settings_widgets['leak Criteria'].text()
group_method = self.curve_settings_widgets['Group method'].currentText()
daily_bin = self.curve_settings_widgets['Daily bin'].currentText()
@@ -256,14 +256,14 @@ def calculateCurrentCurve(self):
max_time = self.curve_settings_widgets['Max time'].text()
if iConsider_leak == 'Yes':
- iConsider_leak = True
+ iConsider_leak = True # noqa: N806
else:
- iConsider_leak = False
+ iConsider_leak = False # noqa: N806
if iRatio == 'Yes':
- iRatio = True
+ iRatio = True # noqa: N806
else:
- iRatio = False
+ iRatio = False # noqa: N806
if daily_bin == 'Yes':
daily_bin = True
@@ -289,21 +289,21 @@ def calculateCurrentCurve(self):
)
self.plotCurve('Exceedance Probability', 'Time')
elif curve_type == 'Quantity':
- iPopulation = self.curve_settings_widgets['Population'].currentText()
+ iPopulation = self.curve_settings_widgets['Population'].currentText() # noqa: N806
# iPopulation = self.curve_population_settings_combo.currentText()
- iRatio = self.curve_settings_widgets['Percentage'].currentText()
- iConsider_leak = self.curve_settings_widgets['LDN leak'].currentText()
+ iRatio = self.curve_settings_widgets['Percentage'].currentText() # noqa: N806
+ iConsider_leak = self.curve_settings_widgets['LDN leak'].currentText() # noqa: N806
leak_ratio = self.curve_settings_widgets['leak Criteria'].text()
if iConsider_leak == 'Yes':
- iConsider_leak = True
+ iConsider_leak = True # noqa: N806
else:
- iConsider_leak = False
+ iConsider_leak = False # noqa: N806
if iRatio == 'Yes':
- iRatio = True
+ iRatio = True # noqa: N806
else:
- iRatio = False
+ iRatio = False # noqa: N806
scn_name = self.scenario_combo.currentText()
self.current_raw_curve = self.project_result.getQNIndexPopulation_4(
@@ -320,21 +320,21 @@ def calculateCurrentCurve(self):
elif curve_type == 'Delivery':
# self.current_curve_data = (curve_type, pd.DataFrame())
- iPopulation = self.curve_settings_widgets['Population'].currentText()
+ iPopulation = self.curve_settings_widgets['Population'].currentText() # noqa: N806
# iPopulation = self.curve_population_settings_combo.currentText()
- iRatio = self.curve_settings_widgets['Percentage'].currentText()
- iConsider_leak = self.curve_settings_widgets['LDN leak'].currentText()
+ iRatio = self.curve_settings_widgets['Percentage'].currentText() # noqa: N806
+ iConsider_leak = self.curve_settings_widgets['LDN leak'].currentText() # noqa: N806
leak_ratio = self.curve_settings_widgets['leak Criteria'].text()
if iConsider_leak == 'Yes':
- iConsider_leak = True
+ iConsider_leak = True # noqa: N806
else:
- iConsider_leak = False
+ iConsider_leak = False # noqa: N806
if iRatio == 'Yes':
- iRatio = True
+ iRatio = True # noqa: N806
else:
- iRatio = False
+ iRatio = False # noqa: N806
scn_name = self.scenario_combo.currentText()
self.current_raw_curve = self.project_result.getDLIndexPopulation_4(
@@ -351,7 +351,7 @@ def calculateCurrentCurve(self):
elif curve_type == 'SSI':
# self.current_curve_data = (curve_type, pd.DataFrame())
- iPopulation = self.curve_settings_widgets['Population'].currentText()
+ iPopulation = self.curve_settings_widgets['Population'].currentText() # noqa: N806
scn_name = self.scenario_combo.currentText()
self.current_raw_curve = (
self.project_result.getSystemServiceabilityIndexCurve(
@@ -363,8 +363,8 @@ def calculateCurrentCurve(self):
)
self.plotCurve('SSI', 'Time')
- def setCurveSettingBox(self, curve_type):
- for i in range(self.curve_settings_table.rowCount()):
+ def setCurveSettingBox(self, curve_type): # noqa: N802, D102
+ for i in range(self.curve_settings_table.rowCount()): # noqa: B007
self.curve_settings_table.removeRow(0)
if curve_type in curve_settings:
@@ -373,7 +373,7 @@ def setCurveSettingBox(self, curve_type):
pass
# raise ValueError("Unknown Curve type: "+repr(curve_type))
- def populateCurveSettingsTable(self, settings_content):
+ def populateCurveSettingsTable(self, settings_content): # noqa: C901, N802, D102
self.curve_settings_widgets.clear()
vertical_header = []
cell_type_list = []
@@ -457,35 +457,35 @@ def populateCurveSettingsTable(self, settings_content):
else:
raise ValueError(repr(cell_type))
- i += 1
+ i += 1 # noqa: SIM113
# for label in settings_content:
- def curveTimeSettingsChanged(self, x):
+ def curveTimeSettingsChanged(self, x): # noqa: ARG002, N802, D102
self.current_curve = self.time_combo.changeCurveTimeUnit(
self.current_raw_curve
)
self.plotCurve()
- def curveSettingChanged(self):
+ def curveSettingChanged(self): # noqa: N802, D102
if 'Population' in self.curve_settings_widgets:
new_population_setting = self.curve_settings_widgets[
'Population'
].currentText()
- if new_population_setting == 'Yes' and type(
- self.project_result._population_data
+ if new_population_setting == 'Yes' and type( # noqa: E721
+ self.project_result._population_data # noqa: SLF001
) == type(None):
self.errorMSG('Error', 'Population data is not loaded')
self.curve_settings_widgets['Population'].setCurrentText('No')
return
self.calculateCurrentCurve()
- def tabChanged(self, index):
+ def tabChanged(self, index): # noqa: N802, D102
if index == 1:
self.initalize_result()
- def saveCurrentCurveByButton(self):
+ def saveCurrentCurveByButton(self): # noqa: N802, D102
# if self.current_curve_data == None:
- if type(self.current_curve) == type(None):
+ if type(self.current_curve) == type(None): # noqa: E721
self.errorMSG('REWET', 'No curve is ploted')
return
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Run_Tab_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Run_Tab_Designer.py
index 81d7f9485..f606975a9 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Run_Tab_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Run_Tab_Designer.py
@@ -1,7 +1,7 @@
"""Created on Wed Nov 2 14:40:45 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
import subprocess
import threading
@@ -9,11 +9,11 @@
from PyQt5.QtCore import QObject, pyqtSignal
-class Custom_Object(QObject):
- outSignal = pyqtSignal(bytes)
+class Custom_Object(QObject): # noqa: D101
+ outSignal = pyqtSignal(bytes) # noqa: N815
-class Run_Tab_Designer:
+class Run_Tab_Designer: # noqa: D101
def __init__(self):
self.run_button.clicked.connect(self.runREWET)
self.stop_button.clicked.connect(self.stopRun)
@@ -22,27 +22,27 @@ def __init__(self):
self.rewet_sub_process = None
self.if_run_in_progress = False
- def runREWET(self):
- if self.if_run_in_progress == True:
+ def runREWET(self): # noqa: N802, D102
+ if self.if_run_in_progress == True: # noqa: E712
return False
if_saved = self.saveProject()
- if if_saved == False:
+ if if_saved == False: # noqa: E712
return False
self.ouput_textedit.clear()
# start = Starter()
- if self.project_file_addr == None:
+ if self.project_file_addr == None: # noqa: E711
self.errorMSG(
'REWET',
'File address is empty. Please report it as a bug to the developer.',
)
self.if_run_in_progress = True
self.setAllTabsEnabled(False)
- threading.Thread(target=self._RunREWETHelper, args=(), daemon=True).start()
+ threading.Thread(target=self._RunREWETHelper, args=(), daemon=True).start() # noqa: RET503
- def _RunREWETHelper(self):
- self.rewet_sub_process = subprocess.Popen(
- ['python', 'initial.py', self.project_file_addr],
+ def _RunREWETHelper(self): # noqa: N802
+ self.rewet_sub_process = subprocess.Popen( # noqa: S603
+ ['python', 'initial.py', self.project_file_addr], # noqa: S607
stdout=subprocess.PIPE,
bufsize=0,
)
@@ -52,7 +52,7 @@ def _RunREWETHelper(self):
self.cobject.outSignal.emit(line)
self.rewet_sub_process.stdout.close()
- def setAllTabsEnabled(self, enabled):
+ def setAllTabsEnabled(self, enabled): # noqa: N802, D102
# self.ouput_textedit.setEnabled(enabled)
self.main_tab.setTabEnabled(1, enabled)
self.main_process1.setTabEnabled(0, enabled)
@@ -64,7 +64,7 @@ def setAllTabsEnabled(self, enabled):
# self.stop_button.setEnabled(True)
# @pyqtSlot(bytes)
- def updateRunOuput(self, string):
+ def updateRunOuput(self, string): # noqa: N802, D102
string = string.decode()
if 'Time of Single run is' in string:
@@ -76,7 +76,7 @@ def updateRunOuput(self, string):
# running code for the project
- def endSimulation(self):
+ def endSimulation(self): # noqa: N802, D102
end_message = (
'\n-------------------\nSIMULATION FINISHED\n-------------------\n'
)
@@ -84,7 +84,7 @@ def endSimulation(self):
self.if_run_in_progress = False
self.ouput_textedit.appendPlainText(end_message)
- def errorInSimulation(self):
+ def errorInSimulation(self): # noqa: N802, D102
end_message = '\n-------------\nERROR OCCURRED\n-------------\n'
self.setAllTabsEnabled(True)
self.if_run_in_progress = False
@@ -94,10 +94,10 @@ def errorInSimulation(self):
)
self.ouput_textedit.appendPlainText(end_message)
- def stopRun(self):
- if self.if_run_in_progress == False:
+ def stopRun(self): # noqa: N802, D102
+ if self.if_run_in_progress == False: # noqa: E712
return
- if type(self.rewet_sub_process) != type(None):
+ if type(self.rewet_sub_process) != type(None): # noqa: E721
self.rewet_sub_process.terminate()
termination_message = '\n-------------\nRUN CANCELLED\n-------------\n'
self.setAllTabsEnabled(True)
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Scenario_Dialog_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Scenario_Dialog_Designer.py
index 895f0bd02..2ee3f9090 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Scenario_Dialog_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Scenario_Dialog_Designer.py
@@ -1,14 +1,14 @@
"""Created on Fri Oct 28 14:09:49 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
from PyQt5 import QtGui, QtWidgets
from .Scenario_Dialog_Window import Ui_Scenario_Dialog
-class Scenario_Dialog_Designer(Ui_Scenario_Dialog):
+class Scenario_Dialog_Designer(Ui_Scenario_Dialog): # noqa: D101
def __init__(self):
self._window = QtWidgets.QDialog()
self.setupUi(self._window)
@@ -20,7 +20,7 @@ def __init__(self):
)
self.probability_line.textChanged.connect(self.probabilityValidatorHelper)
- def probabilityValidatorHelper(self, text):
+ def probabilityValidatorHelper(self, text): # noqa: N802, D102
if float(text) > 1:
self.probability_line.setText(self.last_probability)
else:
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Scenario_Dialog_Window.py b/modules/systemPerformance/REWET/REWET/GUI/Scenario_Dialog_Window.py
index 139897217..27dc08f30 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Scenario_Dialog_Window.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Scenario_Dialog_Window.py
@@ -1,4 +1,4 @@
-# Form implementation generated from reading ui file 'Scenario_Dialog_Window.ui'
+# Form implementation generated from reading ui file 'Scenario_Dialog_Window.ui' # noqa: N999, D100
#
# Created by: PyQt5 UI code generator 5.12.3
#
@@ -8,8 +8,8 @@
from PyQt5 import QtCore, QtWidgets
-class Ui_Scenario_Dialog:
- def setupUi(self, Scenario_Dialog):
+class Ui_Scenario_Dialog: # noqa: D101
+ def setupUi(self, Scenario_Dialog): # noqa: N802, N803, D102
Scenario_Dialog.setObjectName('Scenario_Dialog')
Scenario_Dialog.resize(351, 241)
self.buttonBox = QtWidgets.QDialogButtonBox(Scenario_Dialog)
@@ -61,7 +61,7 @@ def setupUi(self, Scenario_Dialog):
self.buttonBox.rejected.connect(Scenario_Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Scenario_Dialog)
- def retranslateUi(self, Scenario_Dialog):
+ def retranslateUi(self, Scenario_Dialog): # noqa: N802, N803, D102
_translate = QtCore.QCoreApplication.translate
Scenario_Dialog.setWindowTitle(_translate('Scenario_Dialog', 'New Scenario'))
self.label.setText(_translate('Scenario_Dialog', 'Scenario Name'))
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Simulation_Tab_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Simulation_Tab_Designer.py
index 4a227c268..96a06d102 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Simulation_Tab_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Simulation_Tab_Designer.py
@@ -1,7 +1,7 @@
"""Created on Thu Oct 27 19:00:30 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
import os
import tempfile
@@ -9,15 +9,15 @@
from PyQt5 import QtGui, QtWidgets
-class Simulation_Tab_Designer:
+class Simulation_Tab_Designer: # noqa: D101
def __init__(self):
- """These are variables that are shared between ui and settings."""
+ """These are variables that are shared between ui and settings.""" # noqa: D401
self.setSimulationSettings(self.settings)
"""
Reassignment of shared variables.
"""
- self.result_folder_addr = os.getcwd()
+ self.result_folder_addr = os.getcwd() # noqa: PTH109
self.temp_folder_addr = tempfile.mkdtemp()
"""
@@ -49,7 +49,7 @@ def __init__(self):
self.SimulationTimeValidatorHelper
)
- def getSimulationSettings(self):
+ def getSimulationSettings(self): # noqa: N802, D102
if self.result_folder_addr == '':
self.errorMSG('REWET', 'Result folder must be provided')
return False
@@ -64,8 +64,8 @@ def getSimulationSettings(self):
elif self.multiple_radio.isChecked():
self.number_of_damages = 'multiple'
else:
- raise ValueError(
- 'Borh of Run-Type Buttons are not selected which is an error.'
+ raise ValueError( # noqa: TRY003
+ 'Borh of Run-Type Buttons are not selected which is an error.' # noqa: EM101
)
# self.result_folder_addr -- already set
# self.temp_folder_addr -- already set
@@ -75,8 +75,8 @@ def getSimulationSettings(self):
elif self.save_time_step_no_radio.isChecked():
self.save_time_step = False
else:
- raise ValueError(
- 'Both of Time-Save Buttons are not selected which is an error.'
+ raise ValueError( # noqa: TRY003
+ 'Both of Time-Save Buttons are not selected which is an error.' # noqa: EM101
)
self.settings.process['RUN_TIME'] = self.simulation_time
@@ -88,7 +88,7 @@ def getSimulationSettings(self):
return True
- def setSimulationUI(self):
+ def setSimulationUI(self): # noqa: N802, D102
self.simulation_time_line.setText(str(int(self.simulation_time)))
self.simulation_time_step_line.setText(str(int(self.simulation_time_step)))
self.result_folder_addr_line.setText(self.result_folder_addr)
@@ -101,16 +101,16 @@ def setSimulationUI(self):
else:
raise ValueError('Unknown runtype: ' + repr(self.number_of_damages))
- if self.save_time_step == True:
+ if self.save_time_step == True: # noqa: E712
self.save_time_step_yes_radio.setChecked(True)
- elif self.save_time_step == False:
+ elif self.save_time_step == False: # noqa: E712
self.save_time_step_no_radio.setChecked(True)
else:
raise ValueError(
'Unknown time save value: ' + repr(self.save_time_step_no_radio)
)
- def setSimulationSettings(self, settings):
+ def setSimulationSettings(self, settings): # noqa: N802, D102
self.simulation_time = settings.process['RUN_TIME']
self.simulation_time_step = settings.process['simulation_time_step']
self.number_of_damages = settings.process['number_of_damages']
@@ -118,7 +118,7 @@ def setSimulationSettings(self, settings):
self.temp_folder_addr = settings.process['temp_directory']
self.save_time_step = settings.process['save_time_step']
- def ResultFileBrowserClicked(self):
+ def ResultFileBrowserClicked(self): # noqa: N802, D102
directory = QtWidgets.QFileDialog.getExistingDirectory(
self.asli_MainWindow, 'Select Directory'
)
@@ -127,7 +127,7 @@ def ResultFileBrowserClicked(self):
self.result_folder_addr = directory
self.result_folder_addr_line.setText(self.result_folder_addr)
- def tempFileBrowserClicked(self):
+ def tempFileBrowserClicked(self): # noqa: N802, D102
directory = QtWidgets.QFileDialog.getExistingDirectory(
self.asli_MainWindow, 'Select Directory'
)
@@ -136,14 +136,14 @@ def tempFileBrowserClicked(self):
self.temp_folder_addr = directory
self.temp_folder_addr_line.setText(self.temp_folder_addr)
- def SimulationTimeValidatorHelper(self, text):
+ def SimulationTimeValidatorHelper(self, text): # noqa: N802, D102
try:
simulation_time = int(float(self.simulation_time_line.text()))
- except:
+ except: # noqa: E722
simulation_time = 0
try:
simulation_time_step = int(float(self.simulation_time_step_line.text()))
- except:
+ except: # noqa: E722
simulation_time_step = 0
if text == self.simulation_time_line.text():
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Subsitute_Layer_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Subsitute_Layer_Designer.py
index 816c65db4..5a97e19d0 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Subsitute_Layer_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Subsitute_Layer_Designer.py
@@ -1,7 +1,7 @@
"""Created on Thu Jan 5 16:31:32 2023
@author: snaeimi
-"""
+""" # noqa: N999, D400
import os
@@ -11,12 +11,12 @@
from PyQt5 import QtWidgets
-class Subsitute_Layer_Designer(Ui_subsitite_layer_dialoge):
+class Subsitute_Layer_Designer(Ui_subsitite_layer_dialoge): # noqa: D101
def __init__(
self,
subsitute_layer_addr,
subsitute_layer,
- iUse_substitute_layer,
+ iUse_substitute_layer, # noqa: N803
demand_node_layers,
):
super().__init__()
@@ -31,7 +31,7 @@ def __init__(
self.iUse_substitute_layer = iUse_substitute_layer
self.demand_node_layers.to_file(r'Northridge\demand_node_layer.shp')
self.subsitute_layer_addr_line.setText(self.subsitute_layer_addr)
- if type(self.subsitute_layer) != type(None):
+ if type(self.subsitute_layer) != type(None): # noqa: E721
self.subsitute_layer_projection_name_line.setText(
self.subsitute_layer.crs.name
)
@@ -52,18 +52,18 @@ def __init__(
self.iUseSubstituteCheckBoxStateChanged
)
- def iUseSubstituteCheckBoxStateChanged(self, state):
+ def iUseSubstituteCheckBoxStateChanged(self, state): # noqa: N802, D102
if state == 0:
self.iUse_substitute_layer = False
- elif state == 2:
+ elif state == 2: # noqa: PLR2004
self.iUse_substitute_layer = True
- def applyNewSubsituteLayer(self):
+ def applyNewSubsituteLayer(self): # noqa: N802, D102
# demand_node_layers = self.createGeopandasPointDataFrameForNodes(self, self.wn, self.demand_node_name)
- if type(self.subsitute_layer) == type(None):
+ if type(self.subsitute_layer) == type(None): # noqa: E721
return
- def substituteLayerBrowseButton(self):
+ def substituteLayerBrowseButton(self): # noqa: N802, D102
file = QtWidgets.QFileDialog.getOpenFileName(
self._window,
'Open file',
@@ -92,7 +92,7 @@ def substituteLayerBrowseButton(self):
joined_map = gpd.sjoin(self.subsitute_layer, self.demand_node_layers)
number_list = pd.Series(index=self.demand_node_layers.index, data=0)
- for ind, val in joined_map['index_right'].iteritems():
+ for ind, val in joined_map['index_right'].iteritems(): # noqa: B007
number_list.loc[val] = number_list.loc[val] + 1
number_list = number_list[number_list > 1]
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Subsitute_Layer_Window.py b/modules/systemPerformance/REWET/REWET/GUI/Subsitute_Layer_Window.py
index 9f8252279..6e6cdf1a2 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Subsitute_Layer_Window.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Subsitute_Layer_Window.py
@@ -1,4 +1,4 @@
-# Form implementation generated from reading ui file 'Subsitute_Layer_Window.ui'
+# Form implementation generated from reading ui file 'Subsitute_Layer_Window.ui' # noqa: N999, D100
#
# Created by: PyQt5 UI code generator 5.12.3
#
@@ -8,8 +8,8 @@
from PyQt5 import QtCore, QtGui, QtWidgets
-class Ui_subsitite_layer_dialoge:
- def setupUi(self, subsitite_layer_dialoge):
+class Ui_subsitite_layer_dialoge: # noqa: D101
+ def setupUi(self, subsitite_layer_dialoge): # noqa: N802, D102
subsitite_layer_dialoge.setObjectName('subsitite_layer_dialoge')
subsitite_layer_dialoge.resize(403, 407)
self.Subsitute_buttonBox = QtWidgets.QDialogButtonBox(
@@ -81,7 +81,7 @@ def setupUi(self, subsitite_layer_dialoge):
self.Subsitute_buttonBox.rejected.connect(subsitite_layer_dialoge.reject)
QtCore.QMetaObject.connectSlotsByName(subsitite_layer_dialoge)
- def retranslateUi(self, subsitite_layer_dialoge):
+ def retranslateUi(self, subsitite_layer_dialoge): # noqa: N802, D102
_translate = QtCore.QCoreApplication.translate
subsitite_layer_dialoge.setWindowTitle(
_translate('subsitite_layer_dialoge', 'Dialog')
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Symbology_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Symbology_Designer.py
index 4d55b8c64..ccf65f5b0 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Symbology_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Symbology_Designer.py
@@ -1,7 +1,7 @@
"""Created on Fri Jan 6 00:08:01 2023
@author: snaeimi
-"""
+""" # noqa: N999, D400
import sys
@@ -14,7 +14,7 @@
from PyQt5 import QtCore, QtWidgets
-class Symbology_Designer(Ui_Symbology_Dialog):
+class Symbology_Designer(Ui_Symbology_Dialog): # noqa: D101
def __init__(self, sym, data, val_column):
super().__init__()
self._window = QtWidgets.QDialog()
@@ -43,9 +43,9 @@ def __init__(self, sym, data, val_column):
self.add_up_button.clicked.connect(lambda: self.addByButton('UP'))
self.add_below_button.clicked.connect(lambda: self.addByButton('DOWN'))
- self.sample_legend_widget
+ self.sample_legend_widget # noqa: B018
- def initializeForm(self):
+ def initializeForm(self): # noqa: N802, D102
self.method_combo.setCurrentText(self.sym['Method'])
if (
self.sym['Method'] == 'FisherJenks'
@@ -58,12 +58,12 @@ def initializeForm(self):
self.updateTable()
# self.updateLegendSample()
- def addByButton(self, add_location):
- to_be_added_row = None
+ def addByButton(self, add_location): # noqa: N802, D102
+ to_be_added_row = None # noqa: F841
selected_item_list = self.range_table.selectedItems()
if len(selected_item_list) == 0:
return
- else:
+ else: # noqa: RET505
selected_row = selected_item_list[0].row()
if add_location == 'UP':
@@ -96,25 +96,25 @@ def addByButton(self, add_location):
self.sym['kw'] = kw
if self.sym['Method'] != 'UserDefined':
- self.method_combo.blockSignals(True)
+ self.method_combo.blockSignals(True) # noqa: FBT003
self.sym['Method'] = 'UserDefined'
self.no_clases_line.setEnabled(False)
self.method_combo.setCurrentText('User Defined')
- self.method_combo.blockSignals(False)
+ self.method_combo.blockSignals(False) # noqa: FBT003
self.updateTable()
- def numberOfClassEditingFinished(self):
+ def numberOfClassEditingFinished(self): # noqa: N802, D102
k = float(self.no_clases_line.text())
k = int(k)
kw = {'k': k}
self.sym['kw'] = kw
self.updateTable()
- def colorChanged(self, text):
+ def colorChanged(self, text): # noqa: N802, D102
self.sym['Color'] = text
self.updateLegendSample()
- def updateLegendSample(self):
+ def updateLegendSample(self): # noqa: N802, D102
fig, ax = plt.subplots()
self.plotted_map.plot(
ax=ax,
@@ -126,8 +126,8 @@ def updateLegendSample(self):
self.legend_widget.draw()
# self.mpl_map.canvas.fig.tight_layout()
- def updateTable(self):
- self.range_table.blockSignals(True)
+ def updateTable(self): # noqa: N802, D102
+ self.range_table.blockSignals(True) # noqa: FBT003
self.clearRangeTable()
if self.sym['Method'] == 'FisherJenks':
self.class_data = mapclassify.FisherJenks(self.data, self.sym['kw']['k'])
@@ -171,15 +171,15 @@ def updateTable(self):
self.range_table.setItem(number_of_rows, 1, end_item)
self.range_table.setItem(number_of_rows, 2, count_item)
- self.range_table.blockSignals(False)
+ self.range_table.blockSignals(False) # noqa: FBT003
self.updateLegendSample()
- def clearRangeTable(self):
- for i in range(self.range_table.rowCount()):
+ def clearRangeTable(self): # noqa: N802, D102
+ for i in range(self.range_table.rowCount()): # noqa: B007
self.range_table.removeRow(0)
- def methodChanged(self, text):
- print(text)
+ def methodChanged(self, text): # noqa: N802, D102
+ print(text) # noqa: T201
if text == 'FisherJenks':
self.sym['Method'] = 'FisherJenks'
elif text == 'Equal Interval':
@@ -187,7 +187,7 @@ def methodChanged(self, text):
elif text == 'User Defined':
self.sym['Method'] = 'UserDefined'
- if text == 'FisherJenks' or text == 'Equal Interval':
+ if text == 'FisherJenks' or text == 'Equal Interval': # noqa: PLR1714
k = float(self.no_clases_line.text())
k = int(k)
kw = {'k': k}
@@ -196,20 +196,20 @@ def methodChanged(self, text):
# bins = self.getUserDefinedBins()
try:
kw = {'bins': self.bins}
- except:
+ except: # noqa: E722
kw = {'bins': self.class_data}
else:
- raise
+ raise # noqa: PLE0704
self.sym['kw'] = kw
self.updateTable()
- def currentItemChanged(self, current, previous):
- if current != None:
+ def currentItemChanged(self, current, previous): # noqa: ARG002, N802, D102
+ if current != None: # noqa: E711
self.current_item_value = float(current.text())
- print('cur ' + repr(self.current_item_value))
+ print('cur ' + repr(self.current_item_value)) # noqa: T201
- def tableDataChanged(self, item):
+ def tableDataChanged(self, item): # noqa: N802, D102
# row = item.row()
# col = item.column()
@@ -218,8 +218,8 @@ def tableDataChanged(self, item):
try:
new_item_value = float(item.text())
if new_item_value < self.data.min() or new_item_value > self.data.max():
- raise
- except:
+ raise # noqa: PLE0704
+ except: # noqa: E722
self.range_table.item(item.row(), item.column()).setText(
str(previous_item_value)
)
@@ -233,18 +233,18 @@ def tableDataChanged(self, item):
self.sym['kw'] = kw
if self.sym['Method'] != 'UserDefined':
- self.method_combo.blockSignals(True)
+ self.method_combo.blockSignals(True) # noqa: FBT003
self.sym['Method'] = 'UserDefined'
self.no_clases_line.setEnabled(False)
self.method_combo.setCurrentText('User Defined')
- self.method_combo.blockSignals(False)
+ self.method_combo.blockSignals(False) # noqa: FBT003
self.updateTable()
return
- def findBeginingRowFor(self, value):
+ def findBeginingRowFor(self, value): # noqa: N802, D102
if self.range_table.rowCount() == 0:
- raise
+ raise # noqa: PLE0704
for i in range(self.range_table.rowCount() - 1):
current_item_value = float(self.range_table.item(i, 0).text())
@@ -253,9 +253,9 @@ def findBeginingRowFor(self, value):
return i
return self.range_table.rowCount() - 1
- def findEndingRowFor(self, value):
+ def findEndingRowFor(self, value): # noqa: N802, D102
if self.range_table.rowCount() == 0:
- raise
+ raise # noqa: PLE0704
for i in range(self.range_table.rowCount() - 1):
current_item_value = float(self.range_table.item(i, 1).text())
@@ -264,21 +264,21 @@ def findEndingRowFor(self, value):
return i + 1
return self.range_table.rowCount() - 1
- def removeButtonClicked(self):
+ def removeButtonClicked(self): # noqa: N802, D102
selected_item_list = self.range_table.selectedItems()
if len(selected_item_list) == 0:
return
selected_row = selected_item_list[0].row()
self.removeRow(selected_row)
- def removeRow(self, row):
- if row == 0 and self.range_table.rowCount() >= 2:
+ def removeRow(self, row): # noqa: N802, D102
+ if row == 0 and self.range_table.rowCount() >= 2: # noqa: PLR2004
item_text = self.range_table.item(row, 0).text()
self.range_table.removeRow(0)
self.range_table.item(0, 0).setText(item_text)
elif (
row == self.range_table.rowCount() - 1
- and self.range_table.rowCount() >= 2
+ and self.range_table.rowCount() >= 2 # noqa: PLR2004
):
item_text = self.range_table.item(row, 1).text()
self.range_table.removeRow(row)
@@ -296,8 +296,8 @@ def removeRow(self, row):
if __name__ == '__main__':
symbology = {'Method': 'FisherJenks', 'kw': {'k': 5}}
s = gpd.read_file('ss2.shp')
- print(s.columns)
+ print(s.columns) # noqa: T201
app = QtWidgets.QApplication(sys.argv)
ss = Symbology_Designer(symbology, s['restoratio'])
- ss._window.show()
+ ss._window.show() # noqa: SLF001
sys.exit(app.exec_())
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Symbology_Window.py b/modules/systemPerformance/REWET/REWET/GUI/Symbology_Window.py
index 204da001a..23e3340b0 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Symbology_Window.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Symbology_Window.py
@@ -1,4 +1,4 @@
-# Form implementation generated from reading ui file 'Symbology_Window.ui'
+# Form implementation generated from reading ui file 'Symbology_Window.ui' # noqa: N999, D100
#
# Created by: PyQt5 UI code generator 5.12.3
#
@@ -8,8 +8,8 @@
from PyQt5 import QtCore, QtWidgets
-class Ui_Symbology_Dialog:
- def setupUi(self, Symbology_Dialog):
+class Ui_Symbology_Dialog: # noqa: D101
+ def setupUi(self, Symbology_Dialog): # noqa: N802, N803, D102
Symbology_Dialog.setObjectName('Symbology_Dialog')
Symbology_Dialog.resize(491, 410)
self.buttonBox = QtWidgets.QDialogButtonBox(Symbology_Dialog)
@@ -92,7 +92,7 @@ def setupUi(self, Symbology_Dialog):
self.buttonBox.rejected.connect(Symbology_Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Symbology_Dialog)
- def retranslateUi(self, Symbology_Dialog):
+ def retranslateUi(self, Symbology_Dialog): # noqa: N802, N803, D102
_translate = QtCore.QCoreApplication.translate
Symbology_Dialog.setWindowTitle(_translate('Symbology_Dialog', 'Dialog'))
item = self.range_table.horizontalHeaderItem(0)
diff --git a/modules/systemPerformance/REWET/REWET/GUI/Tank_Damage_Discovery_Designer.py b/modules/systemPerformance/REWET/REWET/GUI/Tank_Damage_Discovery_Designer.py
index dbd114033..b3c772eb5 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/Tank_Damage_Discovery_Designer.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/Tank_Damage_Discovery_Designer.py
@@ -1,12 +1,12 @@
"""Created on Tue Nov 1 23:25:30 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
from .Damage_Discovery_Designer import Damage_Discovery_Designer
-class Tank_Damage_Discovery_Designer(Damage_Discovery_Designer):
+class Tank_Damage_Discovery_Designer(Damage_Discovery_Designer): # noqa: D101
def __init__(self, tank_damage_discovery_model):
super().__init__(tank_damage_discovery_model)
self._window.setWindowTitle('Tank Damage Discovery')
diff --git a/modules/systemPerformance/REWET/REWET/GUI/__init__.py b/modules/systemPerformance/REWET/REWET/GUI/__init__.py
index e69de29bb..b74acee6d 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/__init__.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/__init__.py
@@ -0,0 +1 @@
+# noqa: N999, D104
diff --git a/modules/systemPerformance/REWET/REWET/GUI/resources/REWET_Resource_rc.py b/modules/systemPerformance/REWET/REWET/GUI/resources/REWET_Resource_rc.py
index 6f3308008..4589a32af 100644
--- a/modules/systemPerformance/REWET/REWET/GUI/resources/REWET_Resource_rc.py
+++ b/modules/systemPerformance/REWET/REWET/GUI/resources/REWET_Resource_rc.py
@@ -1,4 +1,4 @@
-# Resource object code
+# Resource object code # noqa: INP001, D100
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.12.9)
#
@@ -717,13 +717,13 @@
qt_resource_struct = qt_resource_struct_v2
-def qInitResources():
+def qInitResources(): # noqa: N802, D103
QtCore.qRegisterResourceData(
rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data
)
-def qCleanupResources():
+def qCleanupResources(): # noqa: N802, D103
QtCore.qUnregisterResourceData(
rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data
)
diff --git a/modules/systemPerformance/REWET/REWET/Input/GUI_Input_Interface.py b/modules/systemPerformance/REWET/REWET/Input/GUI_Input_Interface.py
index 2c4b8a36e..c997df316 100644
--- a/modules/systemPerformance/REWET/REWET/Input/GUI_Input_Interface.py
+++ b/modules/systemPerformance/REWET/REWET/Input/GUI_Input_Interface.py
@@ -4,14 +4,14 @@
inputs and the mail code.
@author: snaeimi
-"""
+""" # noqa: INP001, D400
-class input:
+class input: # noqa: A001, D101
def __init__(self, settings, registry):
pass
- def convertShiftFromDictToPandasTable(self, dict_data):
- shift_name_list = list(shift_data)
- shift_begining_list = [shift_data[i][0] for i in shift_name_list]
- shift_end_list = [shift_data[i][1] for i in shift_name_list]
+ def convertShiftFromDictToPandasTable(self, dict_data): # noqa: ARG002, N802, D102
+ shift_name_list = list(shift_data) # noqa: F821
+ shift_begining_list = [shift_data[i][0] for i in shift_name_list] # noqa: F821, F841
+ shift_end_list = [shift_data[i][1] for i in shift_name_list] # noqa: F821, F841
diff --git a/modules/systemPerformance/REWET/REWET/Input/Input_IO.py b/modules/systemPerformance/REWET/REWET/Input/Input_IO.py
index ceb97a9b1..8260a9a72 100644
--- a/modules/systemPerformance/REWET/REWET/Input/Input_IO.py
+++ b/modules/systemPerformance/REWET/REWET/Input/Input_IO.py
@@ -1,4 +1,4 @@
-import json
+import json # noqa: INP001, D100
import os
import pickle
@@ -27,13 +27,13 @@ def read_pipe_damage_seperate_json_file(directory, pipe_file_name):
pipe_damaage = []
pipe_time = []
- file_dest = os.path.join(directory, pipe_file_name)
+ file_dest = os.path.join(directory, pipe_file_name) # noqa: PTH118
- with open(file_dest) as f:
+ with open(file_dest) as f: # noqa: PTH123
read_file = json.load(f)
if not isinstance(read_file, list):
- raise ValueError('Wrong input in PIPE damage file')
+ raise ValueError('Wrong input in PIPE damage file') # noqa: EM101, TRY003, TRY004
for each_damage in read_file:
pipe_time.append(each_damage.get('time'))
@@ -71,13 +71,13 @@ def read_node_damage_seperate_json_file(directory, node_file_name):
node_damage = []
node_time = []
- file_dest = os.path.join(directory, node_file_name)
+ file_dest = os.path.join(directory, node_file_name) # noqa: PTH118
- with open(file_dest) as f:
+ with open(file_dest) as f: # noqa: PTH123
read_file = json.load(f)
if not isinstance(read_file, list):
- raise ValueError('Wrong input in NODE damage file')
+ raise ValueError('Wrong input in NODE damage file') # noqa: EM101, TRY003, TRY004
for each_damage in read_file:
node_time.append(each_damage.get('time'))
@@ -114,13 +114,13 @@ def read_tank_damage_seperate_json_file(directory, tank_file_name):
tank_damage = []
tank_time = []
- file_dest = os.path.join(directory, tank_file_name)
+ file_dest = os.path.join(directory, tank_file_name) # noqa: PTH118
- with open(file_dest) as f:
+ with open(file_dest) as f: # noqa: PTH123
read_file = json.load(f)
if not isinstance(read_file, list):
- raise ValueError('Wrong input in TANK damage file')
+ raise ValueError('Wrong input in TANK damage file') # noqa: EM101, TRY003, TRY004
for each_damage in read_file:
tank_time.append(each_damage.get('time'))
@@ -156,13 +156,13 @@ def read_pump_damage_seperate_json_file(directory, pump_file_name):
pump_damage = []
pump_time = []
- file_dest = os.path.join(directory, pump_file_name)
+ file_dest = os.path.join(directory, pump_file_name) # noqa: PTH118
- with open(file_dest) as f:
+ with open(file_dest) as f: # noqa: PTH123
read_file = json.load(f)
if not isinstance(read_file, list):
- raise ValueError('Wrong input in PUMP damage file')
+ raise ValueError('Wrong input in PUMP damage file') # noqa: EM101, TRY003, TRY004
for each_damage in read_file:
pump_time.append(each_damage.get('time'))
@@ -178,94 +178,94 @@ def read_pump_damage_seperate_json_file(directory, pump_file_name):
# Read files From Pickle #####################
-def read_pipe_damage_seperate_pickle_file(directory, all_damages_file_name):
- file_dest = os.path.join(directory, all_damages_file_name)
- with open(file_dest, 'rb') as f:
- _all_damages = pickle.load(f)
+def read_pipe_damage_seperate_pickle_file(directory, all_damages_file_name): # noqa: D103
+ file_dest = os.path.join(directory, all_damages_file_name) # noqa: PTH118
+ with open(file_dest, 'rb') as f: # noqa: PTH123
+ _all_damages = pickle.load(f) # noqa: S301
- return _all_damages
+ return _all_damages # noqa: RET504
-def read_node_damage_seperate_pickle_file(directory, all_damages_file_name):
- file_dest = os.path.join(directory, all_damages_file_name)
- with open(file_dest, 'rb') as f:
- _node_damages = pickle.load(f)
+def read_node_damage_seperate_pickle_file(directory, all_damages_file_name): # noqa: D103
+ file_dest = os.path.join(directory, all_damages_file_name) # noqa: PTH118
+ with open(file_dest, 'rb') as f: # noqa: PTH123
+ _node_damages = pickle.load(f) # noqa: S301
- return _node_damages
+ return _node_damages # noqa: RET504
-def read_tank_damage_seperate_pickle_file(directory, tank_damages_file_name):
- file_dest = os.path.join(directory, tank_damages_file_name)
- with open(file_dest, 'rb') as f:
- _tank_damages = pickle.load(f)
+def read_tank_damage_seperate_pickle_file(directory, tank_damages_file_name): # noqa: D103
+ file_dest = os.path.join(directory, tank_damages_file_name) # noqa: PTH118
+ with open(file_dest, 'rb') as f: # noqa: PTH123
+ _tank_damages = pickle.load(f) # noqa: S301
- return _tank_damages
+ return _tank_damages # noqa: RET504
-def read_pump_damage_seperate_pickle_file(directory, pump_damages_file_name):
- file_dest = os.path.join(directory, pump_damages_file_name)
- with open(file_dest, 'rb') as f:
- _pump_damages = pickle.load(f)
+def read_pump_damage_seperate_pickle_file(directory, pump_damages_file_name): # noqa: D103
+ file_dest = os.path.join(directory, pump_damages_file_name) # noqa: PTH118
+ with open(file_dest, 'rb') as f: # noqa: PTH123
+ _pump_damages = pickle.load(f) # noqa: S301
- return _pump_damages
+ return _pump_damages # noqa: RET504
# Read files From Excel #####################
-def read_pipe_damage_seperate_EXCEL_file(directory, pipe_damages_file_name):
+def read_pipe_damage_seperate_EXCEL_file(directory, pipe_damages_file_name): # noqa: N802, D103
ss = None
- file_dest = os.path.join(directory, pipe_damages_file_name)
+ file_dest = os.path.join(directory, pipe_damages_file_name) # noqa: PTH118
ss = pd.read_excel(file_dest)
ss.sort_values(
['pipe_id', 'time', 'damage_loc'],
ascending=[True, True, False],
ignore_index=True,
- inplace=True,
+ inplace=True, # noqa: PD002
)
unique_time = ss.groupby(['pipe_id']).time.unique()
if 1 in [
0 if len(i) <= 1 else 1 for i in unique_time
]: # checks if there are any pipe id with more than two unique time values
- raise ValueError(
- 'All damage location for one pipe should happen at the same time'
+ raise ValueError( # noqa: TRY003
+ 'All damage location for one pipe should happen at the same time' # noqa: EM101
)
- ss.set_index('time', inplace=True)
+ ss.set_index('time', inplace=True) # noqa: PD002
ss.pipe_id = ss.pipe_id.astype(str)
return pd.Series(ss.to_dict('records'), index=ss.index)
-def read_node_damage_seperate_EXCEL_file(directory, node_damages_file_name):
+def read_node_damage_seperate_EXCEL_file(directory, node_damages_file_name): # noqa: N802, D103
ss = None
- file_dest = os.path.join(directory, node_damages_file_name)
+ file_dest = os.path.join(directory, node_damages_file_name) # noqa: PTH118
ss = pd.read_excel(file_dest)
- ss.set_index('time', inplace=True)
+ ss.set_index('time', inplace=True) # noqa: PD002
ss.node_name = ss.node_name.astype(str)
return pd.Series(ss.to_dict('records'), index=ss.index)
-def read_tank_damage_seperate_EXCEL_file(directory, tank_damages_file_name):
+def read_tank_damage_seperate_EXCEL_file(directory, tank_damages_file_name): # noqa: N802, D103
ss = None
- file_dest = os.path.join(directory, tank_damages_file_name)
+ file_dest = os.path.join(directory, tank_damages_file_name) # noqa: PTH118
ss = pd.read_excel(file_dest)
# ss.set_index('Tank_ID', inplace=True)
- ss.set_index('time', inplace=True)
+ ss.set_index('time', inplace=True) # noqa: PD002
ss.Tank_ID = ss.Tank_ID.astype(str)
# ss = ss['Tank_ID']
return ss
-def read_pump_damage_seperate_EXCEL_file(directory, pump_damages_file_name):
+def read_pump_damage_seperate_EXCEL_file(directory, pump_damages_file_name): # noqa: N802, D103
ss = None
- file_dest = os.path.join(directory, pump_damages_file_name)
+ file_dest = os.path.join(directory, pump_damages_file_name) # noqa: PTH118
ss = pd.read_excel(file_dest)
- ss.set_index('time', inplace=True)
+ ss.set_index('time', inplace=True) # noqa: PD002
ss.Pump_ID = ss.Pump_ID.astype(str)
return ss
-def read_damage_list(list_file_addr, file_directory, iCheck=False):
+def read_damage_list(list_file_addr, file_directory, iCheck=False): # noqa: FBT002, ARG001, N803
"""Reads damage scenario list.
Parameters
@@ -287,22 +287,22 @@ def read_damage_list(list_file_addr, file_directory, iCheck=False):
damage_list : Pandas Dataframe
DESCRIPTION.
- """
+ """ # noqa: D401
damage_list = None
error_file_name = []
- with open(list_file_addr, 'rb') as f:
+ with open(list_file_addr, 'rb') as f: # noqa: PTH123
damage_list = pd.read_excel(f)
- iError = False
+ iError = False # noqa: N806
temp = damage_list['Pipe Damage'].tolist()
- if iCheck == False:
+ if iCheck == False: # noqa: E712
return damage_list
for file_name in temp:
- if not os.path.exists(file_name):
- iError = True
+ if not os.path.exists(file_name): # noqa: PTH110
+ iError = True # noqa: N806
error_file_name.append(file_name)
if iError:
@@ -315,15 +315,15 @@ def read_damage_list(list_file_addr, file_directory, iCheck=False):
# Save Results #####################
-def save_single(settings, result, name, restoration_data):
+def save_single(settings, result, name, restoration_data): # noqa: D103
result_file_directory = settings.process['result_directory']
# print(result_file_directory)
result_name = name + '.res'
settings_name = name + '.xlsx'
- file_dest = os.path.join(result_file_directory, result_name)
- print('Saving: ' + str(file_dest))
- with open(file_dest, 'wb') as f:
+ file_dest = os.path.join(result_file_directory, result_name) # noqa: PTH118
+ print('Saving: ' + str(file_dest)) # noqa: T201
+ with open(file_dest, 'wb') as f: # noqa: PTH123
pickle.dump(result, f)
process_set = pd.Series(settings.process.settings)
@@ -332,14 +332,14 @@ def save_single(settings, result, name, restoration_data):
process_set.to_list() + scenario_set.to_list(),
index=process_set.index.to_list() + scenario_set.index.to_list(),
)
- file_dest = os.path.join(result_file_directory, settings_name)
+ file_dest = os.path.join(result_file_directory, settings_name) # noqa: PTH118
_set.to_excel(file_dest)
if settings.process['dmg_rst_data_save']:
# file_dest = os.path.join(result_file_directory, 'restoration_file.pkl')
# rest_data_out = pd.DataFrame.from_dict(restoration_data)
# rest_data_out.to_pickle(file_dest)
- file_dest = os.path.join(result_file_directory, name + '_registry.pkl')
- print('Saving: ' + str(file_dest))
- with open(file_dest, 'wb') as f:
+ file_dest = os.path.join(result_file_directory, name + '_registry.pkl') # noqa: PTH118
+ print('Saving: ' + str(file_dest)) # noqa: T201
+ with open(file_dest, 'wb') as f: # noqa: PTH123
pickle.dump(restoration_data, f)
diff --git a/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py b/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py
index 772de816f..36176a055 100644
--- a/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py
+++ b/modules/systemPerformance/REWET/REWET/Input/Policy_IO.py
@@ -3,7 +3,7 @@
This is the Restoration Policy Reader/Writtter Module.
@author: snaeimi
-"""
+""" # noqa: INP001, D400
import logging
from collections import OrderedDict
@@ -32,7 +32,7 @@ def _split_line(line):
return _vals, _cmnt
-class restoration_data:
+class restoration_data: # noqa: D101
def __init__(self):
self.files = {}
self.shift = {}
@@ -52,7 +52,7 @@ def __init__(self):
self.group[el] = OrderedDict()
-class RestorationIO:
+class RestorationIO: # noqa: D101
def __init__(self, definition_file_name):
"""Needs a file that contains:
@@ -67,7 +67,7 @@ def __init__(self, definition_file_name):
-------
None.
- """
+ """ # noqa: D400
# some of the following lines have been adopted from WNTR
self.rm = restoration_data()
@@ -98,23 +98,23 @@ def __init__(self, definition_file_name):
section = None
lnum = 0
edata = {'fname': definition_file_name}
- with open(definition_file_name, encoding='utf-8') as f:
+ with open(definition_file_name, encoding='utf-8') as f: # noqa: PTH123
for line in f:
lnum += 1
edata['lnum'] = lnum
- line = line.strip()
+ line = line.strip() # noqa: PLW2901
nwords = len(line.split())
if len(line) == 0 or nwords == 0:
# Blank line
continue
- elif line.startswith('['):
+ elif line.startswith('['): # noqa: RET507
vals = line.split()
sec = vals[0].upper()
edata['sec'] = sec
if sec in expected_sections:
section = sec
continue
- else:
+ else: # noqa: RET507
raise RuntimeError(
'%(fname)s:%(lnum)d: Invalid section "%(sec)s"' % edata
)
@@ -150,7 +150,7 @@ def _read_files(self):
edata['lnum'] = lnum
words, comments = _split_line(line)
if words is not None and len(words) > 0:
- if len(words) != 2:
+ if len(words) != 2: # noqa: PLR2004
edata['key'] = words[0]
raise RuntimeError(
'%(fname)s:%(lnum)-6d %(sec)13s no value provided for %(key)s'
@@ -167,30 +167,30 @@ def _read_files(self):
def _read_each_file(self, file_address, method=0):
lnum = 0
- iTitle = True
+ iTitle = True # noqa: N806
data_temp = None
if method == 0:
try:
- raise
- with open(file_address, encoding='utf-8') as f:
+ raise # noqa: PLE0704
+ with open(file_address, encoding='utf-8') as f: # noqa: PTH123
for line in f:
- line = line.strip()
+ line = line.strip() # noqa: PLW2901
nwords = len(line.split())
if len(line) == 0 or nwords == 0:
# Blank line
continue
- elif line.startswith(';'):
+ elif line.startswith(';'): # noqa: RET507
# comment
continue
else:
lnum += 1
vals = line.split()
- if iTitle == True:
- iTitle = False
+ if iTitle == True: # noqa: E712
+ iTitle = False # noqa: N806
data_temp = pd.DataFrame(columns=vals)
else:
data_temp.loc[lnum - 2] = vals
- except:
+ except: # noqa: E722
data_temp = self._read_each_file(file_address, method=1)
elif method == 1:
data_temp = pd.read_csv(file_address)
@@ -199,13 +199,13 @@ def _read_each_file(self, file_address, method=0):
return data_temp
def _read_shifts(self):
- for lnum, line in self.sections['[SHIFTS]']:
+ for lnum, line in self.sections['[SHIFTS]']: # noqa: B007
# edata['lnum'] = lnum
words, comments = _split_line(line)
if words is not None and len(words) > 0:
- if len(words) != 3:
- raise RuntimeError(
- '%(fname)s:%(lnum)-6d %(sec)13s no value provided for %(key)s'
+ if len(words) != 3: # noqa: PLR2004
+ raise RuntimeError( # noqa: TRY003
+ '%(fname)s:%(lnum)-6d %(sec)13s no value provided for %(key)s' # noqa: EM101
)
shift_name = words[0]
shift_begining = int(words[1]) * 3600
@@ -213,7 +213,7 @@ def _read_shifts(self):
self.rm.shift[shift_name] = (shift_begining, shift_ending)
- def _read_entities(self):
+ def _read_entities(self): # noqa: C901
"""Reads damage group definitions and updates the Restoration Model
object data.
@@ -230,7 +230,7 @@ def _read_entities(self):
-------
None.
- """
+ """ # noqa: D205, D401
# Entities is kept for legacy compatibility with the first version
damage_group_data = self.sections.get(
'[ENTITIES]', self.sections.get('[Damage Group]')
@@ -241,9 +241,9 @@ def _read_entities(self):
arg2 = None
words, comments = _split_line(line)
if words is not None and len(words) > 0:
- if len(words) != 2 and len(words) != 4:
- raise RuntimeError(
- '%(fname)s:%(lnum)-6d %(sec)13s no value provided for %(key)s'
+ if len(words) != 2 and len(words) != 4: # noqa: PLR2004
+ raise RuntimeError( # noqa: TRY003
+ '%(fname)s:%(lnum)-6d %(sec)13s no value provided for %(key)s' # noqa: EM101
)
entity_name = words[0]
element = words[1].upper()
@@ -254,14 +254,14 @@ def _read_entities(self):
# if entity_name in self.rm.entity:
# raise ValueError('Entity already defined')
- if len(words) == 4:
+ if len(words) == 4: # noqa: PLR2004
arg1 = words[2]
arg2 = words[3]
# if (element=='PIPE' and arg1 not in self.rm._registry._pipe_damage_table.columns and arg1!='FILE' and arg1!='NOT_IN_FILE') and (element=='DISTNODE' and arg1 not in self.rm._registry._node_damage_table.columns):
# raise ValueError('Argument 1('+arg1+') is not recognized in line number: ' + str(lnum))
- if arg1 == None:
+ if arg1 == None: # noqa: E711
self.rm.entity[entity_name] = element
ent_rule = [('ALL', None, None)]
@@ -273,7 +273,7 @@ def _read_entities(self):
# sina: take care of this in registry opening
# self.rm._registry.addAttrToElementDamageTable(element ,entity_name , True)
- elif arg1 == 'FILE' or arg1 == 'NOT_IN_FILE':
+ elif arg1 == 'FILE' or arg1 == 'NOT_IN_FILE': # noqa: PLR1714
name_list = self.rm.files[arg2]['ElementID'].unique().tolist()
ent_rule = [(arg1, None, name_list)]
self.rm.entity[entity_name] = element
@@ -288,7 +288,7 @@ def _read_entities(self):
if ':' in arg2:
split_arg = arg2.split(':')
- if len(split_arg) != 2:
+ if len(split_arg) != 2: # noqa: PLR2004
raise ValueError(
'There must be two parts: PART1:PART2. Now there are '
+ repr(
@@ -317,7 +317,7 @@ def _read_entities(self):
try:
temp_arg3 = float(arg3)
- except:
+ except: # noqa: E722
temp_arg3 = str(arg3)
arg3 = temp_arg3
@@ -336,7 +336,7 @@ def _read_entities(self):
def _read_sequences(self):
# sina: there is a part that you need to add in restroation init
- for lnum, line in self.sections['[SEQUENCES]']:
+ for lnum, line in self.sections['[SEQUENCES]']: # noqa: B007
words, comments = _split_line(line)
if words is not None and len(words) > 0:
# if len(words) != 2 or len(words)!=4:
@@ -344,9 +344,9 @@ def _read_sequences(self):
element = words[0].upper()
seq = []
for arg in words[1:]:
- seq.append(arg)
+ seq.append(arg) # noqa: PERF402
if element in self.rm.sequence:
- raise ValueError('Element already in sequences')
+ raise ValueError('Element already in sequences') # noqa: EM101, TRY003
if element not in ELEMENTS:
raise ValueError(
'The Element '
@@ -361,23 +361,23 @@ def _read_agents(self):
group_column = {}
crews_data = self.sections.get('[AGENTS]', self.sections.get('CREWS'))
- for lnum, line in crews_data:
+ for lnum, line in crews_data: # noqa: B007
# edata['lnum'] = lnum
words, comments = _split_line(line)
if words is not None and len(words) > 0:
_group_name = None
_group_column = None
- if len(words) < 3:
- raise RuntimeError(
- 'less than three argument is not valid for crew definition'
+ if len(words) < 3: # noqa: PLR2004
+ raise RuntimeError( # noqa: TRY003
+ 'less than three argument is not valid for crew definition' # noqa: EM101
)
agent_type = words[0]
if words[1].upper() == 'FILE':
agent_file_handle[words[0]] = words[2]
else:
- raise ValueError('Unknown key')
- if len(words) >= 4:
+ raise ValueError('Unknown key') # noqa: EM101, TRY003
+ if len(words) >= 4: # noqa: PLR2004
group_data = words[3]
_group_name = group_data.split(':')[0]
_group_column = group_data.split(':')[1]
@@ -393,7 +393,7 @@ def _read_agents(self):
agent_number = data['Number']
j = 0
- for lnum, line in data.iterrows():
+ for lnum, line in data.iterrows(): # noqa: B007
# try:
num = int(agent_number[j])
# except :
@@ -412,7 +412,7 @@ def _read_agents(self):
definitions['shift_name'] = predefinitions['Shift']
group_name_temp = None
- if group_names[agent_type] != None:
+ if group_names[agent_type] != None: # noqa: E711
definitions['group'] = predefinitions[
group_column[agent_type]
]
@@ -423,21 +423,21 @@ def _read_agents(self):
definitions['group_name'] = group_name_temp
self.rm.agents.append((agent_name, agent_type, definitions))
- j += 1
+ j += 1 # noqa: SIM113
def _read_groups(self):
for lnum, line in self.sections['[GROUPS]']:
words, comments = _split_line(line)
if words is not None and len(words) > 0:
- if not len(words) >= 6:
+ if not len(words) >= 6: # noqa: PLR2004
raise ValueError('error in line: ' + str(lnum))
group_name = words[0]
element_type = words[1]
argument = words[2]
file_handler = words[3]
- element_col_ID = words[4]
- pipe_col_ID = words[5]
+ element_col_ID = words[4] # noqa: N806
+ pipe_col_ID = words[5] # noqa: N806
if element_type not in ELEMENTS:
raise ValueError(
@@ -481,21 +481,21 @@ def _read_groups(self):
self.rm.group[element_type][group_name] = group_list
- def _read_priorities(self):
+ def _read_priorities(self): # noqa: C901
for lnum, line in self.sections['[PRIORITIES]']:
words, comments = _split_line(line)
if words is not None and len(words) > 0:
- if not len(words) >= 3:
+ if not len(words) >= 3: # noqa: PLR2004
raise ValueError('error in line: ' + str(lnum))
agent_type = words[0]
priority = None
try:
priority = int(words[1])
- except:
- print('exeption handled in _read_priorities')
- if type(priority) != int:
+ except: # noqa: E722
+ print('exeption handled in _read_priorities') # noqa: T201
+ if type(priority) != int: # noqa: E721
raise ValueError(
'Priority casting failed:'
+ str(priority)
@@ -504,7 +504,7 @@ def _read_priorities(self):
)
arg = []
for word in words[2:]:
- temp = None
+ temp = None # noqa: F841
if word.find(':') != -1:
split_temp = word.split(':')
arg.append((split_temp[0], split_temp[1]))
@@ -537,7 +537,7 @@ def _read_jobs(self):
words, comments = _split_line(line)
if words is not None and len(words) > 0:
- if not len(words) >= 3:
+ if not len(words) >= 3: # noqa: PLR2004
raise ValueError(
'Not enough arguments. error in line: ' + str(lnum)
)
@@ -566,19 +566,19 @@ def _read_jobs(self):
if definer.upper() == 'FIXED':
try:
argument = int(argument)
- except:
- print('exeption handled in _read_jobs')
+ except: # noqa: E722
+ print('exeption handled in _read_jobs') # noqa: T201
else:
raise ValueError('Definer is not recognized: ' + definer)
effect = None
- if len(words) >= 4:
+ if len(words) >= 4: # noqa: PLR2004
effect = words[3]
self.rm.jobs.append((agent_type, entity, action, argument, effect))
- def _read_define(self):
- job = {}
+ def _read_define(self): # noqa: C901, PLR0912
+ job = {} # noqa: F841
effect_data = self.sections.get('[DEFINE]', self.sections.get('[EFFECTS]'))
for lnum, line in effect_data:
@@ -590,7 +590,7 @@ def _read_define(self):
try:
method_name = float(words[1])
- except:
+ except: # noqa: E722
method_name = words[1]
res_list = []
@@ -624,15 +624,15 @@ def _read_define(self):
if main_arg == 'RECONNECT':
if arg == 'PIPESIZE':
if 'PIPESIZEFACTOR' in res:
- raise ValueError(
- 'Either pipe size or pipe size factor can be defined'
+ raise ValueError( # noqa: TRY003
+ 'Either pipe size or pipe size factor can be defined' # noqa: EM101
)
res['PIPESIZE'] = float(val)
elif arg == 'PIPESIZEFACTOR':
if 'PIPESIZE' in res:
- raise ValueError(
- 'Either pipe size or pipe size factor can be defined'
+ raise ValueError( # noqa: TRY003
+ 'Either pipe size or pipe size factor can be defined' # noqa: EM101
)
val = float(val)
if val > 1 or val < 0:
@@ -642,9 +642,9 @@ def _read_define(self):
)
res['PIPESIZEFACTOR'] = float(val)
elif arg == 'CV':
- if val == 'TRUE' or val == '1':
+ if val == 'TRUE' or val == '1': # noqa: PLR1714
val = True
- elif val == 'FALSE' or val == '0':
+ elif val == 'FALSE' or val == '0': # noqa: PLR1714
val = False
else:
raise ValueError(
@@ -659,21 +659,21 @@ def _read_define(self):
res['CV'] = val
elif arg == 'PIPELENGTH':
try:
- val == float(val)
+ val == float(val) # noqa: B015
except Exception as e:
- print(
+ print( # noqa: T201
'The value for PIPELENGTH must be a number'
)
- raise e
+ raise e # noqa: TRY201
res['PIPELENGTH'] = val
elif arg == 'PIPEFRICTION':
try:
- val == float(val)
+ val == float(val) # noqa: B015
except Exception as e:
- print(
+ print( # noqa: T201
'The value for PIPEFRICTION must be a number'
)
- raise e
+ raise e # noqa: TRY201
res['PIPEFRICTION'] = val
else:
raise ValueError(
@@ -687,9 +687,9 @@ def _read_define(self):
res['PUMP'] = float(val)
elif arg == 'CV':
- if val == 'TRUE' or val == '1':
+ if val == 'TRUE' or val == '1': # noqa: PLR1714
val = True
- elif val == 'FALSE' or val == '0':
+ elif val == 'FALSE' or val == '0': # noqa: PLR1714
val = False
else:
raise ValueError(
@@ -730,8 +730,8 @@ def _read_define(self):
)
elif main_arg == 'COL_CLOSE_PIPE':
- raise ValueError(
- 'REPAIR at this stage does not accept any argument'
+ raise ValueError( # noqa: TRY003
+ 'REPAIR at this stage does not accept any argument' # noqa: EM101
)
elif main_arg == 'ISOLATE_DN':
@@ -741,7 +741,7 @@ def _read_define(self):
or val[-1] != ')'
or val.find(',') == -1
):
- ValueError(
+ ValueError( # noqa: PLW0133
'After PIDR the format must be like (CONDIION,VALUE)'
)
@@ -751,7 +751,7 @@ def _read_define(self):
_con_val = float(val_split[1])
if not (
- _con == 'BG'
+ _con == 'BG' # noqa: PLR1714
or _con == 'EQ'
or _con == 'LT'
or _con == 'BG-EQ'
@@ -770,14 +770,14 @@ def _read_define(self):
res['PIDR'] = (_con, _con_val)
elif main_arg == 'REPAIR':
- raise ValueError(
- 'REPAIR at this stage does not accept any argument'
+ raise ValueError( # noqa: TRY003
+ 'REPAIR at this stage does not accept any argument' # noqa: EM101
)
elif method_name.upper() == 'DEFAULT':
- try:
+ try: # noqa: SIM105
arg = int(arg)
- except:
+ except: # noqa: S110, E722
pass
if main_arg == 'METHOD_PROBABILITY':
@@ -785,15 +785,15 @@ def _read_define(self):
if val < 0:
raise ValueError(
- 'Probability cannot be less than zero. '
+ 'Probability cannot be less than zero. ' # noqa: ISC003
+ ' In line '
+ lnum
+ ' probability: '
+ val
)
- elif val > 1:
+ elif val > 1: # noqa: RET506
raise ValueError(
- 'Probability cannot be bigger than 1. '
+ 'Probability cannot be bigger than 1. ' # noqa: ISC003
+ ' In line '
+ lnum
+ ' probability: '
@@ -812,16 +812,16 @@ def _read_define(self):
val = None
else:
val = None
- print(
+ print( # noqa: T201
'WARNING: At default line in FINAL section, the third argument is not NULL: '
+ str(val)
+ 'The value is ignored antywhere'
)
self.rm.final_method[job_name] = arg
elif main_arg == 'ONLYONCE':
- try:
+ try: # noqa: SIM105
val = float(val)
- except:
+ except: # noqa: S110, E722
pass
if job_name in self.rm.once:
@@ -847,7 +847,7 @@ def _read_define(self):
i += 2
res_list.append(res)
- if flag == False:
+ if flag == False: # noqa: E712
self.rm.jobs.append((job_name, method_name, res_list))
# for self.rm.effects.pruneData()
@@ -875,7 +875,7 @@ def _read_file_effect(self, file_info, effect_name):
if val not in data.columns:
raise ValueError('Value not in file: ' + val)
if (
- arg == 'ELEMENT_NAME'
+ arg == 'ELEMENT_NAME' # noqa: PLR1714
or arg == 'METHOD_NAME'
or arg == 'METHOD_PROBABILITY'
):
@@ -906,69 +906,69 @@ def _read_file_effect(self, file_info, effect_name):
raise ValueError('Unrecognized argument in pair: ' + _arg)
res = pd.DataFrame(res)
# print(res)
- return res
+ return res # noqa: RET504
def _read_demand_nodes(self):
- titles = []
+ titles = [] # noqa: F841
ntitle = 0
lnum = 0
dtemp = []
- with open(self._demand_Node_file_name, encoding='utf-8') as f:
+ with open(self._demand_Node_file_name, encoding='utf-8') as f: # noqa: PTH123
for line in f:
lnum += 1
- line = line.strip()
+ line = line.strip() # noqa: PLW2901
nwords = len(line.split())
words = line.split()
if len(line) == 0 or nwords == 0:
# Blank line
continue
- elif line.upper().startswith('NODEID'):
+ elif line.upper().startswith('NODEID'): # noqa: RET507
title = words.copy()
ntitle = len(
words
) # we need this to confirm that every line has data for every title(column)
continue
elif nwords != ntitle:
- raise ValueError(
- '%{fname}s:%(lnum)d: Number of data does not match number of titles'
+ raise ValueError( # noqa: TRY003
+ '%{fname}s:%(lnum)d: Number of data does not match number of titles' # noqa: EM101
)
elif nwords == ntitle:
dtemp.append(words)
else:
- raise ValueError(
- '%{fname}s:%(lnum)d:This error must nnever happen'
+ raise ValueError( # noqa: TRY003
+ '%{fname}s:%(lnum)d:This error must nnever happen' # noqa: EM101
)
self.demand_node = pd.DataFrame(dtemp, columns=title)
def _read_crew(self):
- titles = []
+ titles = [] # noqa: F841
ntitle = 0
lnum = 0
dtemp = []
- with open(self._crew_file_name[-1], encoding='utf-8') as f:
+ with open(self._crew_file_name[-1], encoding='utf-8') as f: # noqa: PTH123
for line in f:
lnum += 1
- line = line.strip()
+ line = line.strip() # noqa: PLW2901
nwords = len(line.split())
words = line.split()
if len(line) == 0 or nwords == 0:
# Blank line
continue
- elif line.upper().startswith('DISTYARDID'):
+ elif line.upper().startswith('DISTYARDID'): # noqa: RET507
title = words.copy()
ntitle = len(
words
) # we need this to confirm that every line has data for every title(column)
continue
elif nwords != ntitle:
- raise ValueError(
- '%{fname}s:%(lnum)d: Number of data does not match number of titles'
+ raise ValueError( # noqa: TRY003
+ '%{fname}s:%(lnum)d: Number of data does not match number of titles' # noqa: EM101
)
elif nwords == ntitle:
dtemp.append(words)
else:
- raise ValueError(
- '%{fname}s:%(lnum)d:This error must nnever happen'
+ raise ValueError( # noqa: TRY003
+ '%{fname}s:%(lnum)d:This error must nnever happen' # noqa: EM101
)
self.crew_data[self._crew_file_type[-1]] = pd.DataFrame(
dtemp, columns=title
diff --git a/modules/systemPerformance/REWET/REWET/Input/Settings.py b/modules/systemPerformance/REWET/REWET/Input/Settings.py
index 40a97b8f3..25eadd019 100644
--- a/modules/systemPerformance/REWET/REWET/Input/Settings.py
+++ b/modules/systemPerformance/REWET/REWET/Input/Settings.py
@@ -1,4 +1,4 @@
-import json
+import json # noqa: INP001, D100
import pickle
import warnings
@@ -17,18 +17,18 @@
acceptable_override_list = ['POINTS']
-class base:
+class base: # noqa: D101
def __init__(self):
self.settings = {}
- def __getitem__(self, key):
+ def __getitem__(self, key): # noqa: D105
return self.settings[key]
- def __setitem__(self, key, data):
+ def __setitem__(self, key, data): # noqa: D105
self.settings[key] = data
-class Process_Settings(base):
+class Process_Settings(base): # noqa: D101
def __init__(self):
super().__init__()
"""
@@ -123,7 +123,7 @@ def __init__(self):
self.settings['limit_result_file_size'] = -1 # in Mb. 0 means no limit
-class Scenario_Settings(base):
+class Scenario_Settings(base): # noqa: D101
def __init__(self):
super().__init__()
"""
@@ -204,7 +204,7 @@ def __init__(self):
# Sina, there is no x in the GUI. Implement it
"""
Restoration settings
- """
+ """ # noqa: W291
self.settings['Restoraion_policy_type'] = (
'script' # sina needs to be implemented in the code
)
@@ -248,13 +248,13 @@ def __init__(self):
self.settings['pipe_damage_diameter_factor'] = 1
-class Settings:
+class Settings: # noqa: D101
def __init__(self):
self.process = Process_Settings()
self.scenario = Scenario_Settings()
self.overrides = {}
- def __setitem__(self, key, data):
+ def __setitem__(self, key, data): # noqa: D105
if key in self.process.settings:
self.process.settings[key] = data
elif key in self.scenario.settings:
@@ -262,53 +262,53 @@ def __setitem__(self, key, data):
else:
raise AttributeError(repr(key) + ' is not in the Settings.')
- def __getitem__(self, key):
+ def __getitem__(self, key): # noqa: D105
if key in self.process.settings:
- if self.scenario != None:
+ if self.scenario != None: # noqa: E711
if key in self.scenario.settings:
raise ValueError(
str(key) + ' in both the process and scenario settings.'
)
return self.process.settings[key]
- elif self.scenario != None:
+ elif self.scenario != None: # noqa: RET505, E711
if key in self.scenario.settings:
return self.scenario.settings[key]
raise ValueError(str(key) + ' NOT in either process and scenario settings.')
- def __contains__(self, key):
+ def __contains__(self, key): # noqa: D105
if key in self.process.settings:
return True
- elif self.scenario != None:
+ elif self.scenario != None: # noqa: RET505, E711
if key in self.scenario.settings:
return True
return False
- def importJsonSettings(self, json_file_path):
+ def importJsonSettings(self, json_file_path): # noqa: N802
"""Read a settinsg json file and import the data
Args:
----
json_file_path (path): JSON file path
- """
- with open(json_file_path) as f:
+ """ # noqa: D400
+ with open(json_file_path) as f: # noqa: PTH123
settings_data = json.load(f)
if not isinstance(settings_data, dict):
- raise ValueError(
- 'Wrong JSON file type for the settings. The settings JSOn file must be an OBJECT file type.'
+ raise ValueError( # noqa: TRY003, TRY004
+ 'Wrong JSON file type for the settings. The settings JSOn file must be an OBJECT file type.' # noqa: EM101
)
for key, val in settings_data.items():
if key not in self:
- raise ValueError(
- f'REWET settinsg does not have "{key}" as a settings key'
+ raise ValueError( # noqa: TRY003
+ f'REWET settinsg does not have "{key}" as a settings key' # noqa: EM102
)
- print(key, val)
+ print(key, val) # noqa: T201
if (
key
in [
@@ -326,9 +326,9 @@ def importJsonSettings(self, json_file_path):
self[key] = val
- def importProject(self, project_addr):
- with open(project_addr, 'rb') as f:
- project = pickle.load(f)
+ def importProject(self, project_addr): # noqa: N802, D102
+ with open(project_addr, 'rb') as f: # noqa: PTH123
+ project = pickle.load(f) # noqa: S301
# for k in project.project_settings.scenario.settings:
# new_value = project.project_settings.scenario[k]
# old_value = self.scenario[k]
@@ -336,8 +336,8 @@ def importProject(self, project_addr):
self.process = project.project_settings.process
self.scenario = project.project_settings.scenario
- def initializeScenarioSettings(self, scenario_index):
- if self.process['Parameter_override'] == False:
+ def initializeScenarioSettings(self, scenario_index): # noqa: C901, N802, D102
+ if self.process['Parameter_override'] == False: # noqa: E712
return
list_file = pd.read_excel(self['pipe_damage_file_list'])
@@ -353,8 +353,8 @@ def initializeScenarioSettings(self, scenario_index):
if parameter_name in self:
try:
- if type(override_value) != str and np.isnan(override_value):
- warnings.warn(
+ if type(override_value) != str and np.isnan(override_value): # noqa: E721
+ warnings.warn( # noqa: B028
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
+ '\n'
@@ -363,11 +363,11 @@ def initializeScenarioSettings(self, scenario_index):
+ ' is empty. The override is IGNORED!'
)
continue
- except:
+ except: # noqa: S110, E722
pass
if override_value == '':
- warnings.warn(
+ warnings.warn( # noqa: B028
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
+ '\n'
@@ -385,7 +385,7 @@ def initializeScenarioSettings(self, scenario_index):
override_key1 = splited_parameter_name[0]
override_key2 = splited_parameter_name[1]
- if number_of_words != 2:
+ if number_of_words != 2: # noqa: PLR2004
raise ValueError(
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
@@ -395,7 +395,7 @@ def initializeScenarioSettings(self, scenario_index):
+ ' is not an acceptable parameter'
)
- if override_key1 == None:
+ if override_key1 == None: # noqa: E711
raise ValueError(
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
@@ -405,7 +405,7 @@ def initializeScenarioSettings(self, scenario_index):
)
if override_key1.upper() not in acceptable_override_list:
- warnings.warn(
+ warnings.warn( # noqa: B028
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
+ '\n'
@@ -418,8 +418,8 @@ def initializeScenarioSettings(self, scenario_index):
continue
try:
- if type(override_value) != str and np.isnan(override_value):
- warnings.warn(
+ if type(override_value) != str and np.isnan(override_value): # noqa: E721
+ warnings.warn( # noqa: B028
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
+ '\n'
@@ -428,11 +428,11 @@ def initializeScenarioSettings(self, scenario_index):
+ ' is empty. The override is IGNORED!'
)
continue
- except:
+ except: # noqa: S110, E722
pass
if override_value == '':
- warnings.warn(
+ warnings.warn( # noqa: B028
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
+ '\n'
@@ -443,7 +443,7 @@ def initializeScenarioSettings(self, scenario_index):
continue
if override_key1.upper() == 'POINTS':
- if override_key2 == None:
+ if override_key2 == None: # noqa: E711
raise ValueError(
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
@@ -461,7 +461,7 @@ def initializeScenarioSettings(self, scenario_index):
else:
self.overrides['POINTS'] = {override_key2: point_list}
else:
- warnings.warn(
+ warnings.warn( # noqa: B028
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
+ '\n'
@@ -480,9 +480,9 @@ def initializeScenarioSettings(self, scenario_index):
# warnings.warn("REWET Input ERROR in scenario: " + repr(scenario_name) + "\n" + "SPEEDCREW is not valid; thus, the override is ignored!")
# =============================================================================
else:
- raise ValueError('Unknown overrise key')
+ raise ValueError('Unknown overrise key') # noqa: EM101, TRY003
- def getOverridePointsList(self, points_list_str, scenario_name):
+ def getOverridePointsList(self, points_list_str, scenario_name): # noqa: N802, D102
point_list = []
points_list_str = points_list_str.strip()
@@ -490,7 +490,7 @@ def getOverridePointsList(self, points_list_str, scenario_name):
for word in points_list_str:
if ':' not in word:
- warnings.warn(
+ warnings.warn( # noqa: B028
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
+ '\n'
@@ -503,8 +503,8 @@ def getOverridePointsList(self, points_list_str, scenario_name):
splited_word = word.split(':')
- if len(splited_word) > 2:
- warnings.warn(
+ if len(splited_word) > 2: # noqa: PLR2004
+ warnings.warn( # noqa: B028
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
+ '\n'
@@ -520,8 +520,8 @@ def getOverridePointsList(self, points_list_str, scenario_name):
try:
x_coord = float(x_coord)
- except:
- warnings.warn(
+ except: # noqa: E722
+ warnings.warn( # noqa: B028
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
+ '\n'
@@ -536,8 +536,8 @@ def getOverridePointsList(self, points_list_str, scenario_name):
try:
y_coord = float(y_coord)
- except:
- warnings.warn(
+ except: # noqa: E722
+ warnings.warn( # noqa: B028
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
+ '\n'
@@ -554,11 +554,11 @@ def getOverridePointsList(self, points_list_str, scenario_name):
return point_list
- def getOverrideCrewSpeed(self, crew_speed_str, scenario_name):
+ def getOverrideCrewSpeed(self, crew_speed_str, scenario_name): # noqa: N802, D102
crew_speed_str = crew_speed_str.strip()
if len(crew_speed_str.split()) > 1:
- warnings.warn(
+ warnings.warn( # noqa: B028
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
+ '\n'
@@ -569,8 +569,8 @@ def getOverrideCrewSpeed(self, crew_speed_str, scenario_name):
try:
crew_speed = float(crew_speed_str)
- except:
- warnings.warn(
+ except: # noqa: E722
+ warnings.warn( # noqa: B028
'REWET Input ERROR in scenario: '
+ repr(scenario_name)
+ '\n'
diff --git a/modules/systemPerformance/REWET/REWET/Main_GUI.py b/modules/systemPerformance/REWET/REWET/Main_GUI.py
index debef25f4..ba6823a4c 100644
--- a/modules/systemPerformance/REWET/REWET/Main_GUI.py
+++ b/modules/systemPerformance/REWET/REWET/Main_GUI.py
@@ -1,7 +1,7 @@
"""Created on Thu Nov 10 21:46:04 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
import os
import sys
@@ -10,5 +10,5 @@
from GUI.Opening_Designer import Opening_Designer
opening_designer = Opening_Designer()
- print(os.getpid())
+ print(os.getpid()) # noqa: T201
sys.exit(opening_designer.run())
diff --git a/modules/systemPerformance/REWET/REWET/Output/Crew_Report.py b/modules/systemPerformance/REWET/REWET/Output/Crew_Report.py
index ef19a2d03..ade0ecae5 100644
--- a/modules/systemPerformance/REWET/REWET/Output/Crew_Report.py
+++ b/modules/systemPerformance/REWET/REWET/Output/Crew_Report.py
@@ -1,25 +1,25 @@
"""Created on Thu Oct 27 15:45:10 2022
@author: snaeimi
-"""
+""" # noqa: INP001, D400
import pandas as pd
-class Crew_Report:
+class Crew_Report: # noqa: D101
def __init__(self):
pass
- def getCrewForTime(self, scn_name, time):
+ def getCrewForTime(self, scn_name, time): # noqa: N802, D102
self.loadScneariodata(scn_name)
reg = self.registry[scn_name]
- crew_table = reg.restoration_log_book._agent_state_log_book
+ crew_table = reg.restoration_log_book._agent_state_log_book # noqa: SLF001
crew_table = crew_table.set_index('Time')
crew_table = crew_table.loc[time]
- return crew_table
+ return crew_table # noqa: RET504
- def getCrewTableAt(self, scn_name, time, crew_type_name, crew_zone=None):
+ def getCrewTableAt(self, scn_name, time, crew_type_name, crew_zone=None): # noqa: N802, D102
self.loadScneariodata(scn_name)
reg = self.registry[scn_name]
# crew_type = self.getCrewForTime(scn_name, time)
@@ -27,25 +27,25 @@ def getCrewTableAt(self, scn_name, time, crew_type_name, crew_zone=None):
typed_crew_table = crew_table[crew_table['type'] == crew_type_name]
if crew_zone is not None:
- if type(crew_zone) == str:
+ if type(crew_zone) == str: # noqa: E721
typed_crew_table = typed_crew_table[
typed_crew_table['group'] == crew_zone
]
- elif type(crew_zone) == list:
+ elif type(crew_zone) == list: # noqa: E721
i = 0
for crew_zone_value in crew_zone:
if i == 0:
res = typed_crew_table['group'] == crew_zone_value
else:
res = (typed_crew_table['group'] == crew_zone_value) | res
- i += 1
+ i += 1 # noqa: SIM113
typed_crew_table = typed_crew_table[res]
else:
raise ValueError('Unknown crew_zone type: ' + repr(type(crew_zone)))
return typed_crew_table
- def getCrewAvailabilityThroughTime(
+ def getCrewAvailabilityThroughTime( # noqa: N802, D102
self,
scn_name,
crew_type_name,
@@ -65,19 +65,19 @@ def getCrewAvailabilityThroughTime(
)
total_number = len(crew_table_time)
available_number_time = crew_table_time[
- (crew_table_time['available'] == True)
- | (crew_table_time['active'] == True)
+ (crew_table_time['available'] == True) # noqa: E712
+ | (crew_table_time['active'] == True) # noqa: E712
]
crew_number.loc[time] = len(available_number_time)
return total_number, crew_number
- def getCrewOnShiftThroughTime(
+ def getCrewOnShiftThroughTime( # noqa: N802, D102
self,
scn_name,
crew_type_name,
crew_zone=None,
- not_on_shift=False,
+ not_on_shift=False, # noqa: FBT002
):
self.loadScneariodata(scn_name)
reg = self.registry[scn_name]
@@ -93,13 +93,13 @@ def getCrewOnShiftThroughTime(
)
total_number = len(crew_table_time)
- if not_on_shift == False:
+ if not_on_shift == False: # noqa: E712
available_number_time = crew_table_time[
- crew_table_time['active'] == True
+ crew_table_time['active'] == True # noqa: E712
]
- elif not_on_shift == True:
+ elif not_on_shift == True: # noqa: E712
available_number_time = crew_table_time[
- crew_table_time['active'] == False
+ crew_table_time['active'] == False # noqa: E712
]
else:
raise ValueError('Unnown not on shift' + repr(not_on_shift))
@@ -107,12 +107,12 @@ def getCrewOnShiftThroughTime(
return total_number, crew_number
- def getCrewWorkingThroughTime(
+ def getCrewWorkingThroughTime( # noqa: N802, D102
self,
scn_name,
crew_type_name,
crew_zone=None,
- not_on_working=False,
+ not_on_working=False, # noqa: FBT002
):
self.loadScneariodata(scn_name)
reg = self.registry[scn_name]
@@ -129,15 +129,15 @@ def getCrewWorkingThroughTime(
total_number = len(crew_table_time)
# available_number_time = crew_table_time[crew_table_time['available']==True]
available_number_time = crew_table_time[
- crew_table_time['active'] == True
+ crew_table_time['active'] == True # noqa: E712
]
- if not_on_working == False:
+ if not_on_working == False: # noqa: E712
available_number_time = available_number_time[
- available_number_time['ready'] == False
+ available_number_time['ready'] == False # noqa: E712
]
- elif not_on_working == True:
+ elif not_on_working == True: # noqa: E712
available_number_time = available_number_time[
- available_number_time['ready'] == True
+ available_number_time['ready'] == True # noqa: E712
]
else:
raise ValueError('Unnown not on shift' + repr(not_on_working))
@@ -145,7 +145,7 @@ def getCrewWorkingThroughTime(
return total_number, crew_number
- def getCrewCompleteStatusReport(self, scn_name, crew_type_name, crew_zone=None):
+ def getCrewCompleteStatusReport(self, scn_name, crew_type_name, crew_zone=None): # noqa: N802, D102
self.loadScneariodata(scn_name)
reg = self.registry[scn_name]
crew_table = reg.restoration_log_book.crew_history
@@ -171,7 +171,7 @@ def getCrewCompleteStatusReport(self, scn_name, crew_type_name, crew_zone=None):
scn_name, time, crew_type_name, crew_zone
)
- for agent_index, agent_row in crew_table_time.iterrows():
+ for agent_index, agent_row in crew_table_time.iterrows(): # noqa: B007
if agent_row['data'].isOnShift(time):
crew_report.loc[time, 'on-duty'] += 1
else:
@@ -182,14 +182,14 @@ def getCrewCompleteStatusReport(self, scn_name, crew_type_name, crew_zone=None):
crew_report.loc[time, 'Reported'] += 1
if agent_row['active'] and agent_row['ready']:
crew_report.loc[time, 'idle'] += 1
- elif agent_row['active'] and agent_row['ready'] == False:
+ elif agent_row['active'] and agent_row['ready'] == False: # noqa: E712
crew_report.loc[time, 'busy'] += 1
else:
crew_report.loc[time, 'Total_not-reported'] += 1
if agent_row['data'].isOnShift(time):
crew_report.loc[time, 'Not-reported'] += 1
- if agent_row['active'] == True:
- print('time=' + str(time))
- print(agent_row)
+ if agent_row['active'] == True: # noqa: E712
+ print('time=' + str(time)) # noqa: T201
+ print(agent_row) # noqa: T201
return crew_report
diff --git a/modules/systemPerformance/REWET/REWET/Output/Curve.py b/modules/systemPerformance/REWET/REWET/Output/Curve.py
index 24f14aa85..ef7d06e3b 100644
--- a/modules/systemPerformance/REWET/REWET/Output/Curve.py
+++ b/modules/systemPerformance/REWET/REWET/Output/Curve.py
@@ -1,24 +1,24 @@
"""Created on Tue Oct 25 14:30:01 2022
@author: snaeimi
-"""
+""" # noqa: INP001, D400
import pandas as pd
from .Helper import hhelper
-class Curve:
+class Curve: # noqa: D101
def __init__():
pass
- def getPipeStatusByAction(self, scn_name, action):
+ def getPipeStatusByAction(self, scn_name, action): # noqa: N802, D102
self.loadScneariodata(scn_name)
reg = self.registry[scn_name]
sequence = reg.retoration_data['sequence']['PIPE']
if action not in sequence:
raise ValueError('the action is not in the sequence: ' + str(action))
- pipe_damage_table_time_series = reg._pipe_damage_table_time_series
+ pipe_damage_table_time_series = reg._pipe_damage_table_time_series # noqa: SLF001
time_action_done = {}
for time in pipe_damage_table_time_series:
current_pipe_damage_table = pipe_damage_table_time_series[time]
@@ -30,7 +30,7 @@ def getPipeStatusByAction(self, scn_name, action):
~current_action_damage.isna()
]
current_action_damage_true = current_action_damage[
- current_action_damage == True
+ current_action_damage == True # noqa: E712
]
unique_done_orginal_element_list = (
(
@@ -53,13 +53,13 @@ def getPipeStatusByAction(self, scn_name, action):
return pd.Series(time_action_done)
- def getNodeStatusByAction(self, scn_name, action):
+ def getNodeStatusByAction(self, scn_name, action): # noqa: N802, D102
self.loadScneariodata(scn_name)
reg = self.registry[scn_name]
sequence = reg.retoration_data['sequence']['DISTNODE']
if action not in sequence:
raise ValueError('the action is not in the sequence: ' + str(action))
- node_damage_table_time_series = reg._node_damage_table_time_series
+ node_damage_table_time_series = reg._node_damage_table_time_series # noqa: SLF001
time_action_done = {}
for time in node_damage_table_time_series:
current_node_damage_table = node_damage_table_time_series[time]
@@ -71,7 +71,7 @@ def getNodeStatusByAction(self, scn_name, action):
~current_action_damage.isna()
]
current_action_damage_true = current_action_damage[
- current_action_damage == True
+ current_action_damage == True # noqa: E712
]
unique_done_orginal_element_list = (
(
@@ -94,7 +94,7 @@ def getNodeStatusByAction(self, scn_name, action):
return pd.Series(time_action_done)
- def getPumpStatus(self, scn_name):
+ def getPumpStatus(self, scn_name): # noqa: N802, D102
self.loadScneariodata(scn_name)
res = self.data[scn_name]
reg = self.registry[scn_name]
@@ -109,7 +109,7 @@ def getPumpStatus(self, scn_name):
return pd.Series(time_action_done)
- def getTankStatus(self, scn_name):
+ def getTankStatus(self, scn_name): # noqa: N802, D102
self.loadScneariodata(scn_name)
reg = self.registry[scn_name]
time_list = reg.time_list
@@ -123,7 +123,7 @@ def getTankStatus(self, scn_name):
return pd.Series(time_action_done)
- def getInputWaterFlowCurve(
+ def getInputWaterFlowCurve( # noqa: C901, N802, D102
self,
scn_name,
tank_name_list=None,
@@ -133,7 +133,7 @@ def getInputWaterFlowCurve(
self.loadScneariodata(scn_name)
res = self.data[scn_name]
- if tank_name_list == None:
+ if tank_name_list == None: # noqa: E711
tank_name_list = self.wn.tank_name_list
not_known_tank = set(tank_name_list) - set(self.wn.tank_name_list)
@@ -143,7 +143,7 @@ def getInputWaterFlowCurve(
+ repr(tank_name_list)
)
- if reservoir_name_list == None:
+ if reservoir_name_list == None: # noqa: E711
reservoir_name_list = self.wn.reservoir_name_list
not_known_reservoir = set(reservoir_name_list) - set(
@@ -160,7 +160,7 @@ def getInputWaterFlowCurve(
# inbound_flow = 0
# outbound_flow = 0
- waterFlow = None
+ waterFlow = None # noqa: N806
for tank_name in tank_name_list:
if tank_name in res.node['demand'].columns:
@@ -175,11 +175,11 @@ def getInputWaterFlowCurve(
inbound_flow.loc[time] += -1 * flow
if mode == 'all':
- waterFlow = outbound_flow + inbound_flow
+ waterFlow = outbound_flow + inbound_flow # noqa: N806
elif mode == 'out':
- waterFlow = outbound_flow
+ waterFlow = outbound_flow # noqa: N806
elif mode == 'in':
- waterFlow = inbound_flow
+ waterFlow = inbound_flow # noqa: N806
else:
raise ValueError('Unnown mode: ' + repr(mode))
@@ -196,19 +196,19 @@ def getInputWaterFlowCurve(
inbound_flow.loc[time] += -1 * flow
if mode == 'all':
- waterFlow = outbound_flow + inbound_flow
+ waterFlow = outbound_flow + inbound_flow # noqa: N806
elif mode == 'out':
- waterFlow = outbound_flow
+ waterFlow = outbound_flow # noqa: N806
elif mode == 'in':
- waterFlow = inbound_flow
+ waterFlow = inbound_flow # noqa: N806
else:
raise ValueError('Unnown mode: ' + repr(mode))
return waterFlow
- def getOveralDemandSatisfied(self, scn_name, pure=False):
+ def getOveralDemandSatisfied(self, scn_name, pure=False): # noqa: FBT002, N802, D102
self.loadScneariodata(scn_name)
- if pure == False:
+ if pure == False: # noqa: E712
demand_node_name_list = self.demand_node_name_list
else:
demand_node_name_list = []
@@ -225,27 +225,27 @@ def getOveralDemandSatisfied(self, scn_name, pure=False):
# sat_node_demands = sat_node_demands.applymap(hhelper)
s = sat_node_demands.sum(axis=1)
- return s
+ return s # noqa: RET504
- def getWaterLeakingFromNode(self, scn_name):
+ def getWaterLeakingFromNode(self, scn_name): # noqa: N802, D102
self.loadScneariodata(scn_name)
res = self.data[scn_name]
sum_amount = 0
try:
res = res.node['leak']
sum_amount = res.sum(axis=1)
- except:
+ except: # noqa: E722
sum_amount = 0
return sum_amount
- def getWaterLeakingFromPipe(self, scn_name, mode='all'):
+ def getWaterLeakingFromPipe(self, scn_name, mode='all'): # noqa: N802, D102
self.loadScneariodata(scn_name)
reg = self.registry[scn_name]
res = self.data[scn_name]
- damage_location_list = reg._pipe_damage_table
+ damage_location_list = reg._pipe_damage_table # noqa: SLF001
- if mode == 'leak' or mode == 'break':
+ if mode == 'leak' or mode == 'break': # noqa: PLR1714
damage_location_list = damage_location_list[
damage_location_list['damage_type'] == mode
]
@@ -257,7 +257,7 @@ def getWaterLeakingFromPipe(self, scn_name, mode='all'):
break_damage_table = damage_location_list[
damage_location_list['damage_type'] == 'break'
]
- pipe_B_list = self.registry[scn_name]._pipe_break_history.loc[
+ pipe_B_list = self.registry[scn_name]._pipe_break_history.loc[ # noqa: SLF001, N806
break_damage_table.index, 'Node_B'
]
@@ -273,13 +273,13 @@ def getWaterLeakingFromPipe(self, scn_name, mode='all'):
leak_from_pipe = res.node['demand'][available_nodes]
- leak = leak_from_pipe < -0.1
+ leak = leak_from_pipe < -0.1 # noqa: PLR2004
if leak.any().any():
- raise ValueError('There is negative leak')
+ raise ValueError('There is negative leak') # noqa: EM101, TRY003
return leak_from_pipe.sum(axis=1)
- def getSystemServiceabilityIndexCurve(self, scn_name, iPopulation='No'):
+ def getSystemServiceabilityIndexCurve(self, scn_name, iPopulation='No'): # noqa: N802, N803, D102
s4 = self.getRequiredDemandForAllNodesandtime(scn_name)
sat_node_demands = (
self.data[scn_name].node['demand'].filter(self.demand_node_name_list)
@@ -298,20 +298,20 @@ def getSystemServiceabilityIndexCurve(self, scn_name, iPopulation='No'):
for time_index, val in s.iteritems():
if val < 0:
- val = 0
+ val = 0 # noqa: PLW2901
elif val > 1:
- val = 1
+ val = 1 # noqa: PLW2901
s.loc[time_index] = val
return s
- def getBSCIndexPopulation_4(
+ def getBSCIndexPopulation_4( # noqa: N802, D102
self,
scn_name,
bsc='DL',
- iPopulation=False,
- ratio=False,
- consider_leak=False,
+ iPopulation=False, # noqa: FBT002, N803
+ ratio=False, # noqa: FBT002
+ consider_leak=False, # noqa: FBT002
leak_ratio=1,
):
if bsc == 'DL':
@@ -322,7 +322,7 @@ def getBSCIndexPopulation_4(
consider_leak=consider_leak,
leak_ratio=leak_ratio,
)
- elif bsc == 'QN':
+ elif bsc == 'QN': # noqa: RET505
return self.getQNIndexPopulation_4(
scn_name,
iPopulation=iPopulation,
@@ -331,26 +331,26 @@ def getBSCIndexPopulation_4(
leak_ratio=leak_ratio,
)
else:
- raise ValueError(f'BSC input is not recognizable: {bsc}')
+ raise ValueError(f'BSC input is not recognizable: {bsc}') # noqa: EM102, TRY003
- def getDLIndexPopulation_4(
+ def getDLIndexPopulation_4( # noqa: C901, N802, D102
self,
scn_name,
- iPopulation='No',
- ratio=False,
- consider_leak=False,
+ iPopulation='No', # noqa: N803
+ ratio=False, # noqa: FBT002
+ consider_leak=False, # noqa: FBT002
leak_ratio=1,
):
- if type(leak_ratio) != float:
+ if type(leak_ratio) != float: # noqa: E721
leak_ratio = float(leak_ratio)
self.loadScneariodata(scn_name)
res = self.data[scn_name]
- if type(self._population_data) == type(None) or iPopulation == False:
+ if type(self._population_data) == type(None) or iPopulation == False: # noqa: E712, E721
pop = pd.Series(index=self.demand_node_name_list, data=1)
- elif type(self._population_data) == type(None) and iPopulation == True:
- raise ValueError('Population data is not available')
+ elif type(self._population_data) == type(None) and iPopulation == True: # noqa: E712, E721
+ raise ValueError('Population data is not available') # noqa: EM101, TRY003
else:
pop = self._population_data
@@ -388,38 +388,38 @@ def getDLIndexPopulation_4(
if name in leak_data.columns:
leak_data_name = leak_data[name]
for time in leak_data_name.index:
- if leak_data_name.loc[time] == True:
+ if leak_data_name.loc[time] == True: # noqa: E712
s.loc[time, name] = False
s = s * pop[s.columns]
- if ratio == False:
+ if ratio == False: # noqa: E712
total_pop = 1
else:
total_pop = pop.sum()
result = s.sum(axis=1) / total_pop
- return result
+ return result # noqa: RET504
- def getQNIndexPopulation_4(
+ def getQNIndexPopulation_4( # noqa: C901, N802, D102
self,
scn_name,
- iPopulation=False,
- ratio=False,
- consider_leak=False,
+ iPopulation=False, # noqa: FBT002, N803
+ ratio=False, # noqa: FBT002
+ consider_leak=False, # noqa: FBT002
leak_ratio=0.75,
):
- if type(leak_ratio) != float:
+ if type(leak_ratio) != float: # noqa: E721
leak_ratio = float(leak_ratio)
self.loadScneariodata(scn_name)
res = self.data[scn_name]
- if type(self._population_data) == type(None) or iPopulation == False:
+ if type(self._population_data) == type(None) or iPopulation == False: # noqa: E712, E721
pop = pd.Series(index=self.demand_node_name_list, data=1)
- elif type(self._population_data) == type(None) and iPopulation == True:
- raise ValueError('Population data is not available')
+ elif type(self._population_data) == type(None) and iPopulation == True: # noqa: E712, E721
+ raise ValueError('Population data is not available') # noqa: EM101, TRY003
else:
pop = self._population_data
@@ -454,27 +454,27 @@ def getQNIndexPopulation_4(
if name in leak_data.columns:
leak_data_name = leak_data[name]
for time in leak_data_name.index:
- if leak_data_name.loc[time] == True:
+ if leak_data_name.loc[time] == True: # noqa: E712
s.loc[time, name] = False
s = s * pop[s.columns]
- if ratio == False:
+ if ratio == False: # noqa: E712
total_pop = 1
else:
total_pop = pop.sum()
result = s.sum(axis=1) / total_pop
- return result
+ return result # noqa: RET504
- def getQuantityExceedanceCurve(
+ def getQuantityExceedanceCurve( # noqa: N802, D102
self,
- iPopulation='No',
- ratio=False,
- consider_leak=False,
+ iPopulation='No', # noqa: ARG002, N803
+ ratio=False, # noqa: FBT002
+ consider_leak=False, # noqa: FBT002
leak_ratio=0.75,
result_type='mean',
- daily=False,
+ daily=False, # noqa: FBT002
min_time=0,
max_time=999999999999999,
):
@@ -509,14 +509,14 @@ def getQuantityExceedanceCurve(
return res
- def getDeliveryExceedanceCurve(
+ def getDeliveryExceedanceCurve( # noqa: N802, D102
self,
- iPopulation='No',
- ratio=False,
- consider_leak=False,
+ iPopulation='No', # noqa: N803
+ ratio=False, # noqa: FBT002
+ consider_leak=False, # noqa: FBT002
leak_ratio=0.75,
result_type='mean',
- daily=False,
+ daily=False, # noqa: FBT002
min_time=0,
max_time=999999999999999,
):
diff --git a/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py b/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py
index aa548bed6..549c86b0c 100644
--- a/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py
+++ b/modules/systemPerformance/REWET/REWET/Output/GUI_Curve_API.py
@@ -1,15 +1,15 @@
"""Created on Thu Nov 10 19:12:46 2022
@author: snaeimi
-"""
+""" # noqa: INP001, D400
import pickle
-def getDummyDataForQNExeedanceCurve():
- with open('qn_data.pkl', 'rb') as f:
- dummy_data = pickle.load(f)
- return dummy_data
+def getDummyDataForQNExeedanceCurve(): # noqa: N802, D103
+ with open('qn_data.pkl', 'rb') as f: # noqa: PTH123
+ dummy_data = pickle.load(f) # noqa: S301
+ return dummy_data # noqa: RET504
"""
@@ -24,7 +24,7 @@ def getDummyDataForQNExeedanceCurve():
"""
-def QNExceedanceCurve(pr, percentage_list, time_type, time_shift=0):
+def QNExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N802
"""Gets Project Result object, and returns Exceedance probability and Quantity
outage for the given percentages. Caution: the current version only accept
one percentage in the percentage list.
@@ -42,21 +42,21 @@ def QNExceedanceCurve(pr, percentage_list, time_type, time_shift=0):
-------
None.
- """
+ """ # noqa: D205, D401
data = getDummyDataForQNExeedanceCurve()
if len(percentage_list) > 1:
- raise ValueError(
- 'the current version only accept one percentage in the percentage list'
+ raise ValueError( # noqa: TRY003
+ 'the current version only accept one percentage in the percentage list' # noqa: EM101
)
- if type(time_shift) != int:
+ if type(time_shift) != int: # noqa: E721
raise ValueError(
'Time shift must be integer type: ' + repr(type(time_shift)) + '.'
)
if time_shift < 0:
- raise ValueError('Time shift ust be bigger than or equal to zero.')
+ raise ValueError('Time shift ust be bigger than or equal to zero.') # noqa: EM101, TRY003
res = {}
for percentage in percentage_list:
@@ -75,7 +75,7 @@ def QNExceedanceCurve(pr, percentage_list, time_type, time_shift=0):
return res
-def DLExceedanceCurve(pr, percentage_list, time_type, time_shift=0):
+def DLExceedanceCurve(pr, percentage_list, time_type, time_shift=0): # noqa: N802
"""Gets Project Result object, and returns Exceedance probability and Delivery
outage for the given percentages. Caution: the current version only accept
one percentage in the percentage list.
@@ -93,21 +93,21 @@ def DLExceedanceCurve(pr, percentage_list, time_type, time_shift=0):
-------
None.
- """
+ """ # noqa: D205, D401
data = getDummyDataForQNExeedanceCurve()
if len(percentage_list) > 1:
- raise ValueError(
- 'the current version only accept one percentage in the percentage list'
+ raise ValueError( # noqa: TRY003
+ 'the current version only accept one percentage in the percentage list' # noqa: EM101
)
- if type(time_shift) != int:
+ if type(time_shift) != int: # noqa: E721
raise ValueError(
'Time shift must be integer type: ' + repr(type(time_shift)) + '.'
)
if time_shift < 0:
- raise ValueError('Time shift ust be bigger than or equal to zero.')
+ raise ValueError('Time shift ust be bigger than or equal to zero.') # noqa: EM101, TRY003
res = {}
for percentage in percentage_list:
diff --git a/modules/systemPerformance/REWET/REWET/Output/Helper.py b/modules/systemPerformance/REWET/REWET/Output/Helper.py
index d7b1671b2..137949784 100644
--- a/modules/systemPerformance/REWET/REWET/Output/Helper.py
+++ b/modules/systemPerformance/REWET/REWET/Output/Helper.py
@@ -1,7 +1,7 @@
"""Created on Mon Oct 24 18:10:31 2022
@author: snaeimi
-"""
+""" # noqa: INP001, D400
# import numba
import operator
@@ -10,16 +10,16 @@
import numpy as np
-def hhelper(x):
+def hhelper(x): # noqa: D103
if x < 0:
return 0
- else:
+ else: # noqa: RET505
return x
# @numba.jit()
-def EPHelper(prob_mat, old):
- if old == False: # prob_mat = prob_mat.tolist()
+def EPHelper(prob_mat, old): # noqa: N802, D103
+ if old == False: # prob_mat = prob_mat.tolist() # noqa: E712
# one_minus_p_list = 1-prob_mat
one_minus_p_list = [1 - p for p in prob_mat]
pi_one_minus_p_list = [
@@ -27,11 +27,11 @@ def EPHelper(prob_mat, old):
for i in range(len(one_minus_p_list))
]
# pi_one_minus_p_list = [rr.apply(lambda x: [x[i] * x[1], raw=True)
- return pi_one_minus_p_list
+ return pi_one_minus_p_list # noqa: RET504
# pi_one_minus_p_list.iloc[0] = one_minus_p_list.iloc[0]
# return (pd.Series(1.00, index=pi_one_minus_p_list.index) - pi_one_minus_p_list, prob_mat)
- else:
+ else: # noqa: RET505
ep_mat = np.ndarray(prob_mat.size)
for i in np.arange(prob_mat.size):
j = 0
@@ -45,12 +45,12 @@ def EPHelper(prob_mat, old):
return ep_mat
-def helper_outageMap(pandas_list):
+def helper_outageMap(pandas_list): # noqa: N802, D103
false_found_flag = False
b_list = pandas_list.tolist()
i = 0
for b_value in b_list:
- if b_value == False:
+ if b_value == False: # noqa: E712
false_found_flag = True
break
i += 1
@@ -58,8 +58,8 @@ def helper_outageMap(pandas_list):
return false_found_flag, i
-def hhelper(x):
+def hhelper(x): # noqa: D103, F811
if x < 0:
return 0
- else:
+ else: # noqa: RET505
return x
diff --git a/modules/systemPerformance/REWET/REWET/Output/Map.py b/modules/systemPerformance/REWET/REWET/Output/Map.py
index 2385900e4..c35ba5c8e 100644
--- a/modules/systemPerformance/REWET/REWET/Output/Map.py
+++ b/modules/systemPerformance/REWET/REWET/Output/Map.py
@@ -13,7 +13,7 @@
}
@author: snaeimi
-"""
+""" # noqa: INP001, D205
import warnings
@@ -26,25 +26,25 @@
# import time
-class Map:
+class Map: # noqa: D101
def __init__(self):
pass
# def loadShapeFile(shapeFileAddr='Northridge\GIS\Demand\demand_polygons.shp'):
- def loadShapeFile(
+ def loadShapeFile( # noqa: N802, D102
self,
- shapeFileAddr=r'Northridge\GIS\Demand\demand_polygons.shp',
+ shapeFileAddr=r'Northridge\GIS\Demand\demand_polygons.shp', # noqa: N803
):
shape_file = gpd.read_file(shapeFileAddr)
- return shape_file
+ return shape_file # noqa: RET504
- def joinTwoShapeFiles(self, first, second):
+ def joinTwoShapeFiles(self, first, second): # noqa: N802, D102
second = second.set_crs(crs=first.crs)
joined_map = gpd.sjoin(first, second)
- return joined_map
+ return joined_map # noqa: RET504
- def createGeopandasPointDataFrameForNodes(self):
+ def createGeopandasPointDataFrameForNodes(self): # noqa: N802, D102
s = gpd.GeoDataFrame(index=self.demand_node_name_list)
point_list = []
point_name_list = []
@@ -56,7 +56,7 @@ def createGeopandasPointDataFrameForNodes(self):
s.geometry = point_list
return s
- def getDLQNExceedenceProbabilityMap(self, data_frame, ihour, param):
+ def getDLQNExceedenceProbabilityMap(self, data_frame, ihour, param): # noqa: N802, D102
data = data_frame.transpose()
scn_prob_list = self.scenario_prob
# DLQN_dmg = pd.DataFrame(data=0, index=data.index, columns=data.columns)
@@ -102,7 +102,7 @@ def getDLQNExceedenceProbabilityMap(self, data_frame, ihour, param):
)
inter_series = inter_series.interpolate(method='linear')
inter_value = inter_series.loc[inter_ind]
- if type(inter_value) != np.float64:
+ if type(inter_value) != np.float64: # noqa: E721
inter_value = inter_value.mean()
res_dict_list.append({'node_name': node_name, 'res': inter_value})
@@ -136,7 +136,7 @@ def getDLQNExceedenceProbabilityMap(self, data_frame, ihour, param):
)
inter_series = inter_series.interpolate(method='linear')
inter_value = inter_series.loc[inter_ind]
- if type(inter_value) != np.float64:
+ if type(inter_value) != np.float64: # noqa: E721
inter_value = inter_value.mean()
res_dict_list.append({'node_name': node_name, 'res': inter_value})
@@ -154,14 +154,14 @@ def getDLQNExceedenceProbabilityMap(self, data_frame, ihour, param):
# ax.get_legend().set_title('Hours without service')
# ax.get_legend()._loc=3
# props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
- print(tt)
+ print(tt) # noqa: T201
return s
- def getOutageTimeGeoPandas_4(
+ def getOutageTimeGeoPandas_4( # noqa: C901, N802, D102
self,
scn_name,
- LOS='DL',
- iConsider_leak=False,
+ LOS='DL', # noqa: N803
+ iConsider_leak=False, # noqa: FBT002, N803
leak_ratio=0,
consistency_time_window=7200,
):
@@ -197,9 +197,9 @@ def getOutageTimeGeoPandas_4(
demands = demands[self.demand_node_name_list]
if LOS == 'DL':
- DL_res_not_met_bool = refined_res <= demands * 0.01
+ DL_res_not_met_bool = refined_res <= demands * 0.01 # noqa: N806
elif LOS == 'QN':
- DL_res_not_met_bool = refined_res < demands * 0.98
+ DL_res_not_met_bool = refined_res < demands * 0.98 # noqa: N806
time_window = consistency_time_window + 1
time_list = DL_res_not_met_bool.index.to_list()
@@ -209,39 +209,39 @@ def getOutageTimeGeoPandas_4(
past_time_beg = time - time_window
window_data = DL_res_not_met_bool.loc[past_time_beg:time]
window_data = window_data.all()
- window_data_false = window_data[window_data == False]
+ window_data_false = window_data[window_data == False] # noqa: E712
DL_res_not_met_bool.loc[time, window_data_false.index] = False
for name in DL_res_not_met_bool:
if name in leak_data.columns:
leak_data_name = leak_data[name]
for time in leak_data_name.index:
- if leak_data_name.loc[time] == True:
+ if leak_data_name.loc[time] == True: # noqa: E712
DL_res_not_met_bool.loc[time, name] = True
all_node_name_list = refined_res.columns
only_not_met_bool = DL_res_not_met_bool.any(0)
only_not_met_any = all_node_name_list[only_not_met_bool]
- DL_res_not_met = DL_res_not_met_bool.filter(only_not_met_any)
- DL_res_MET = ~DL_res_not_met
+ DL_res_not_met = DL_res_not_met_bool.filter(only_not_met_any) # noqa: N806
+ DL_res_MET = ~DL_res_not_met # noqa: N806
time_window = 2
for name in only_not_met_any:
- rolled_DL_res_MET = (
+ rolled_DL_res_MET = ( # noqa: N806
DL_res_MET[name].rolling(time_window, center=True).sum()
)
- rolled_DL_res_MET = rolled_DL_res_MET.sort_index(ascending=False)
- rolled_DL_res_MET.dropna(inplace=True)
+ rolled_DL_res_MET = rolled_DL_res_MET.sort_index(ascending=False) # noqa: N806
+ rolled_DL_res_MET.dropna(inplace=True) # noqa: PD002
false_found, found_index = Helper.helper_outageMap(
rolled_DL_res_MET.ge(time_window - 1)
)
# if name == "SM323":
# return DL_res_MET[name], rolled_DL_res_MET, false_found, rolled_DL_res_MET.index[found_index], rolled_DL_res_MET.ge(time_window-1), found_index
- if false_found == False:
+ if false_found == False: # noqa: E712
latest_time = 0
else:
- if DL_res_MET[name].iloc[-1] == False:
+ if DL_res_MET[name].iloc[-1] == False: # noqa: E712
latest_time = DL_res_MET.index[-1]
else:
latest_time = rolled_DL_res_MET.index[found_index]
@@ -257,14 +257,14 @@ def getOutageTimeGeoPandas_4(
return geopandas_df
- def getOutageTimeGeoPandas_5(
+ def getOutageTimeGeoPandas_5( # noqa: C901, N802, D102
self,
scn_name,
bsc='DL',
- iConsider_leak=False,
+ iConsider_leak=False, # noqa: FBT002, N803
leak_ratio=0,
consistency_time_window=7200,
- sum_time=False,
+ sum_time=False, # noqa: FBT002
):
self.loadScneariodata(scn_name)
res = self.data[scn_name]
@@ -287,9 +287,9 @@ def getOutageTimeGeoPandas_5(
delivered_demand = delivered_demand[common_nodes_demand]
required_demand = required_demand[common_nodes_demand]
- required_demand.sort_index(inplace=True)
- delivered_demand.sort_index(inplace=True)
- leak_res.sort_index(inplace=True)
+ required_demand.sort_index(inplace=True) # noqa: PD002
+ delivered_demand.sort_index(inplace=True) # noqa: PD002
+ leak_res.sort_index(inplace=True) # noqa: PD002
# return delivered_demand, required_demand, leak_res
@@ -323,7 +323,7 @@ def getOutageTimeGeoPandas_5(
)
# leak_res.loc[leak_res_non_available_time_list, : ] = temp_data
leak_res = leak_res.append(temp_data)
- leak_res.sort_index(inplace=True)
+ leak_res.sort_index(inplace=True) # noqa: PD002
leak_criteria_exceeded = (
leak_res.fillna(0) >= leak_ratio * required_demand[leak_res.columns]
)
@@ -358,12 +358,12 @@ def getOutageTimeGeoPandas_5(
)
else:
# print(step_time_beg)
- window_bsc_not_met.drop(step_time_beg, inplace=True)
+ window_bsc_not_met.drop(step_time_beg, inplace=True) # noqa: PD002
else:
window_bsc_not_met = bsc_res_not_met_bool
pre_incident = (window_bsc_not_met.loc[: 3600 * 3]).any()
- non_incident = pre_incident[pre_incident == False].index
+ non_incident = pre_incident[pre_incident == False].index # noqa: E712
not_met_node_name_list = window_bsc_not_met.any()
@@ -371,7 +371,7 @@ def getOutageTimeGeoPandas_5(
# print(not_met_node_name_list[not_met_node_name_list==True])
not_met_node_name_list = not_met_node_name_list[
- not_met_node_name_list == True
+ not_met_node_name_list == True # noqa: E712
]
not_met_node_name_list = not_met_node_name_list.index
@@ -384,7 +384,7 @@ def getOutageTimeGeoPandas_5(
).transpose()
timed_diference_window_bsc_not_met.iloc[0] = 0
sum_window_bsc_not_met = timed_diference_window_bsc_not_met.sum()
- return sum_window_bsc_not_met
+ return sum_window_bsc_not_met # noqa: RET504
window_bsc_not_met = window_bsc_not_met[not_met_node_name_list]
cut_time = window_bsc_not_met.index.max()
@@ -392,11 +392,11 @@ def getOutageTimeGeoPandas_5(
set(non_incident).intersection(set(not_met_node_name_list))
)
for step_time, row in window_bsc_not_met[non_incident].iterrows():
- if step_time <= 14400:
+ if step_time <= 14400: # noqa: PLR2004
continue
- if row.any() == False:
- print(step_time)
+ if row.any() == False: # noqa: E712
+ print(step_time) # noqa: T201
cut_time = step_time
break
@@ -415,7 +415,7 @@ def getOutageTimeGeoPandas_5(
)
number_of_unreported_demand_nodes = len(never_reported_nodes)
if number_of_unreported_demand_nodes > 0:
- warnings.warn(
+ warnings.warn( # noqa: B028
'REWET WARNING: there are '
+ str(number_of_unreported_demand_nodes)
+ 'unreported nodes'
@@ -423,7 +423,7 @@ def getOutageTimeGeoPandas_5(
map_res.loc[never_reported_nodes] = end_time
map_res = map_res / (3600 * 24)
- return map_res
+ return map_res # noqa: RET504
s = gpd.GeoDataFrame(index=self.demand_node_name_list)
point_list = []
@@ -444,13 +444,13 @@ def getOutageTimeGeoPandas_5(
# return joined_map
# joined_map.loc[map_res.index.to_list(), 'restoration_time'] = (map_res/3600/24).to_list()
- return joined_map
+ return joined_map # noqa: RET504
- def percentOfEffectNodes(
+ def percentOfEffectNodes( # noqa: C901, N802, D102
self,
scn_name,
bsc='QN',
- iConsider_leak=True,
+ iConsider_leak=True, # noqa: FBT002, N803
leak_ratio=0.75,
consistency_time_window=7200,
):
@@ -473,9 +473,9 @@ def percentOfEffectNodes(
delivered_demand = delivered_demand[common_nodes_demand]
required_demand = required_demand[common_nodes_demand]
- required_demand.sort_index(inplace=True)
- delivered_demand.sort_index(inplace=True)
- leak_res.sort_index(inplace=True)
+ required_demand.sort_index(inplace=True) # noqa: PD002
+ delivered_demand.sort_index(inplace=True) # noqa: PD002
+ leak_res.sort_index(inplace=True) # noqa: PD002
# return delivered_demand, required_demand, leak_res
@@ -509,7 +509,7 @@ def percentOfEffectNodes(
)
# leak_res.loc[leak_res_non_available_time_list, : ] = temp_data
leak_res = leak_res.append(temp_data)
- leak_res.sort_index(inplace=True)
+ leak_res.sort_index(inplace=True) # noqa: PD002
leak_criteria_exceeded = (
leak_res.fillna(0) >= leak_ratio * required_demand[leak_res.columns]
)
@@ -543,10 +543,10 @@ def percentOfEffectNodes(
)
else:
# print(step_time_beg)
- window_bsc_not_met.drop(step_time_beg, inplace=True)
+ window_bsc_not_met.drop(step_time_beg, inplace=True) # noqa: PD002
# return window_bsc_not_met
pre_incident = (window_bsc_not_met.loc[: 3600 * 3]).any()
- non_incident = pre_incident[pre_incident == False].index
+ non_incident = pre_incident[pre_incident == False].index # noqa: E712
number_of_good_nodes = len(non_incident)
@@ -556,7 +556,7 @@ def percentOfEffectNodes(
# print(not_met_node_name_list[not_met_node_name_list==True])
not_met_node_name_list = not_met_node_name_list[
- not_met_node_name_list == True
+ not_met_node_name_list == True # noqa: E712
]
not_met_node_name_list = not_met_node_name_list.index
window_bsc_not_met = window_bsc_not_met[not_met_node_name_list]
@@ -566,11 +566,11 @@ def percentOfEffectNodes(
set(non_incident).intersection(set(not_met_node_name_list))
)
for step_time, row in window_bsc_not_met[non_incident].iterrows():
- if step_time <= 14400:
+ if step_time <= 14400: # noqa: PLR2004
continue
- if row.any() == False:
- print(step_time)
+ if row.any() == False: # noqa: E712
+ print(step_time) # noqa: T201
cut_time = step_time
break
@@ -594,7 +594,7 @@ def percentOfEffectNodes(
)
number_of_unreported_demand_nodes = len(never_reported_nodes)
if number_of_unreported_demand_nodes > 0:
- warnings.warn(
+ warnings.warn( # noqa: B028
'REWET WARNING: there are '
+ str(number_of_unreported_demand_nodes)
+ 'unreported nodes'
diff --git a/modules/systemPerformance/REWET/REWET/Output/Raw_Data.py b/modules/systemPerformance/REWET/REWET/Output/Raw_Data.py
index e6b175948..d9a9c267b 100644
--- a/modules/systemPerformance/REWET/REWET/Output/Raw_Data.py
+++ b/modules/systemPerformance/REWET/REWET/Output/Raw_Data.py
@@ -1,14 +1,14 @@
"""Created on Mon Oct 24 18:27:03 2022
@author: snaeimi
-"""
+""" # noqa: INP001, D400
-class Raw_Data:
+class Raw_Data: # noqa: D101
def __init__():
pass
- def saveDetailedDemandNodeData(
+ def saveDetailedDemandNodeData( # noqa: N802, D102
self,
scn_name,
data_type,
@@ -23,7 +23,7 @@ def saveDetailedDemandNodeData(
data = data[self.demand_node_name_list]
self.saveDataFrame(data, file_address, file_type=file_type)
- def saveDetailedJunctionData(self, scn_name, data_type, file_address, file_type):
+ def saveDetailedJunctionData(self, scn_name, data_type, file_address, file_type): # noqa: N802, D102
if data_type not in ['pressure', 'head', 'demand', 'quality']:
raise ValueError(
'data type is not recognized for junctiosn: ' + repr(data_type)
@@ -32,7 +32,7 @@ def saveDetailedJunctionData(self, scn_name, data_type, file_address, file_type)
data = data[self.wn.junction_name_list]
self.saveDataFrame(data, file_address, file_type=file_type)
- def saveDetailedTankData(self, scn_name, data_type, file_address, file_type):
+ def saveDetailedTankData(self, scn_name, data_type, file_address, file_type): # noqa: N802, D102
if data_type not in ['pressure', 'head', 'demand', 'quality']:
raise ValueError(
'data type is not recognized for tanks: ' + repr(data_type)
@@ -41,7 +41,7 @@ def saveDetailedTankData(self, scn_name, data_type, file_address, file_type):
data = data[self.wn.tank_name_list]
self.saveDataFrame(data, file_address, file_type=file_type)
- def saveDetailedReservoirData(
+ def saveDetailedReservoirData( # noqa: N802, D102
self,
scn_name,
data_type,
@@ -56,7 +56,7 @@ def saveDetailedReservoirData(
data = data[self.wn.tank_name_list]
self.saveDataFrame(data, file_address, file_type=file_type)
- def saveDetailedPipeData(self, scn_name, data_type, file_address, file_type):
+ def saveDetailedPipeData(self, scn_name, data_type, file_address, file_type): # noqa: N802, D102
if data_type not in [
'linkquality',
'flowrate',
@@ -74,7 +74,7 @@ def saveDetailedPipeData(self, scn_name, data_type, file_address, file_type):
data = data[self.wn.pipe_name_list]
self.saveDataFrame(data, file_address, file_type=file_type)
- def saveDetailedPumpData(self, scn_name, data_type, file_address, file_type):
+ def saveDetailedPumpData(self, scn_name, data_type, file_address, file_type): # noqa: N802, D102
if data_type not in [
'linkquality',
'flowrate',
@@ -92,7 +92,7 @@ def saveDetailedPumpData(self, scn_name, data_type, file_address, file_type):
data = data[self.wn.pump_name_list]
self.saveDataFrame(data, file_address, file_type=file_type)
- def saveDetailedValveData(self, scn_name, data_type, file_address, file_type):
+ def saveDetailedValveData(self, scn_name, data_type, file_address, file_type): # noqa: N802, D102
if data_type not in [
'linkquality',
'flowrate',
@@ -110,7 +110,7 @@ def saveDetailedValveData(self, scn_name, data_type, file_address, file_type):
data = data[self.wn.valve_name_list]
self.saveDataFrame(data, file_address, file_type=file_type)
- def getDetailedData(self, scn_name, data_type):
+ def getDetailedData(self, scn_name, data_type): # noqa: N802, D102
cur_scn_data = None
if data_type in [
'linkquality',
@@ -126,10 +126,10 @@ def getDetailedData(self, scn_name, data_type):
elif data_type in ['pressure', 'head', 'demand', 'quality']:
cur_scn_data = self.data[scn_name].node[data_type]
else:
- raise ValueError('Unknown Data Type For output')
+ raise ValueError('Unknown Data Type For output') # noqa: EM101, TRY003
return cur_scn_data
- def saveDataFrame(dataframe, file_address, file_type='xlsx'):
+ def saveDataFrame(dataframe, file_address, file_type='xlsx'): # noqa: N802, N805, D102
if file_type == 'xlsx':
dataframe.to_excel(file_address)
elif file_type == 'csv':
diff --git a/modules/systemPerformance/REWET/REWET/Output/Result_Time.py b/modules/systemPerformance/REWET/REWET/Output/Result_Time.py
index daab0166e..f0267634f 100644
--- a/modules/systemPerformance/REWET/REWET/Output/Result_Time.py
+++ b/modules/systemPerformance/REWET/REWET/Output/Result_Time.py
@@ -1,25 +1,25 @@
"""Created on Thu Nov 10 18:00:55 2022
@author: snaeimi
-"""
+""" # noqa: INP001, D400
import numpy as np
import pandas as pd
-class Result_Time:
+class Result_Time: # noqa: D101
def __init__():
pass
- def convertTimeSecondToDay(self, data, column, time_shift=0):
+ def convertTimeSecondToDay(self, data, column, time_shift=0): # noqa: N802, D102
data.loc[:, column] = data.loc[:, column] - time_shift
data.loc[:, column] = data.loc[:, column] / 24 / 3600
- def convertTimeSecondToHour(self, data, column, time_shift=0):
+ def convertTimeSecondToHour(self, data, column, time_shift=0): # noqa: N802, D102
data.loc[:, column] = data.loc[:, column] - time_shift
data.loc[:, column] = data.loc[:, column] / 3600
- def averageOverDaysCrewTotalReport(self, crew_report):
+ def averageOverDaysCrewTotalReport(self, crew_report): # noqa: N802, D102
time_max_seconds = crew_report.index.max()
time_max_days = int(np.ceil(time_max_seconds / 24 / 3600))
daily_crew_report = pd.DataFrame(
diff --git a/modules/systemPerformance/REWET/REWET/Project.py b/modules/systemPerformance/REWET/REWET/Project.py
index e3d68108b..a3faf81ad 100644
--- a/modules/systemPerformance/REWET/REWET/Project.py
+++ b/modules/systemPerformance/REWET/REWET/Project.py
@@ -1,10 +1,10 @@
"""Created on Mon Jan 9 09:03:57 2023
@author: snaeimi
-"""
+""" # noqa: N999, D400
-class Project:
+class Project: # noqa: D101
def __init__(self, project_settings, scenario_list):
self.scenario_list = scenario_list
self.project_settings = project_settings
diff --git a/modules/systemPerformance/REWET/REWET/Report_Reading.py b/modules/systemPerformance/REWET/REWET/Report_Reading.py
index c2d203e68..e490739a5 100644
--- a/modules/systemPerformance/REWET/REWET/Report_Reading.py
+++ b/modules/systemPerformance/REWET/REWET/Report_Reading.py
@@ -1,12 +1,12 @@
"""Created on Tue Oct 4 16:07:24 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
import datetime
-def parseTimeStamp(time_stamp):
+def parseTimeStamp(time_stamp): # noqa: N802, D103
striped_time_stamp = time_stamp.split(':')
hour = striped_time_stamp[0]
minute = striped_time_stamp[1]
@@ -19,11 +19,11 @@ def parseTimeStamp(time_stamp):
return (hour, minute, minute)
-class Report_Reading:
+class Report_Reading: # noqa: D101
def __init__(self, file_addr):
self.file_data = {}
self.maximum_trial_time = []
- with open(file_addr, encoding='utf-8') as f:
+ with open(file_addr, encoding='utf-8') as f: # noqa: PTH123
lnum = 0
for line in f:
# self.file_data[lnum] = line
@@ -52,4 +52,4 @@ def __init__(self, file_addr):
).total_seconds()
time_sec = int(time_sec)
self.maximum_trial_time.append(time_sec)
- lnum += 1
+ lnum += 1 # noqa: SIM113
diff --git a/modules/systemPerformance/REWET/REWET/Result_Project.py b/modules/systemPerformance/REWET/REWET/Result_Project.py
index b6021c270..7ac0b92cb 100644
--- a/modules/systemPerformance/REWET/REWET/Result_Project.py
+++ b/modules/systemPerformance/REWET/REWET/Result_Project.py
@@ -1,7 +1,7 @@
"""Created on Sun Oct 23 15:00:31 2022
@author: snaeimi
-"""
+""" # noqa: N999, D400
import copy
import os
@@ -20,23 +20,23 @@
from Output.Result_Time import Result_Time
-class Project_Result(Map, Raw_Data, Curve, Crew_Report, Result_Time):
+class Project_Result(Map, Raw_Data, Curve, Crew_Report, Result_Time): # noqa: D101
def __init__(
self,
project_file_addr,
result_directory=None,
- ignore_not_found=False,
+ ignore_not_found=False, # noqa: FBT002
to_neglect_file=None,
node_col='',
result_file_dir=None,
- iObject=False,
+ iObject=False, # noqa: FBT002, N803
):
- if iObject == False:
+ if iObject == False: # noqa: E712
self.readPorjectFile(project_file_addr)
else:
self.project = copy.deepcopy(project_file_addr)
- if result_file_dir != None:
+ if result_file_dir != None: # noqa: E711
self.project.project_settings.process.settings['result_directory'] = (
result_file_dir
)
@@ -82,8 +82,8 @@ def __init__(
to_neglect = []
# sina hereeeee bug dadi amedane
- if to_neglect_file != None and False:
- raise
+ if to_neglect_file != None and False: # noqa: SIM223, E711
+ raise # noqa: PLE0704
file_data = pd.read_excel(to_neglect_file)
to_neglect = file_data[node_col].to_list()
@@ -98,32 +98,32 @@ def __init__(
self.node_name_list = self.wn.node_name_list.copy()
ret_val = self.checkForNotExistingFile(ignore_not_found)
self.prepareData()
- return ret_val
+ return ret_val # noqa: PLE0101
- def readPorjectFile(self, project_file_addr):
- print(project_file_addr)
- with open(project_file_addr, 'rb') as f:
- self.project = pickle.load(f)
+ def readPorjectFile(self, project_file_addr): # noqa: N802, D102
+ print(project_file_addr) # noqa: T201
+ with open(project_file_addr, 'rb') as f: # noqa: PTH123
+ self.project = pickle.load(f) # noqa: S301
- def loadPopulation(self, popuation_data, node_id_header, population_header):
+ def loadPopulation(self, popuation_data, node_id_header, population_header): # noqa: N802, D102
pop = popuation_data.copy()
pop = pop.set_index(node_id_header)
pop = pop[population_header]
self._population_data = pop
- def checkForNotExistingFile(self, ignore_not_found):
+ def checkForNotExistingFile(self, ignore_not_found): # noqa: N802, D102
self.scn_name_list_that_result_file_not_found = []
result_directory = self.result_directory
# print(self.project.scenario_list)
- for scn_name, row in self.project.scenario_list.iterrows():
+ for scn_name, row in self.project.scenario_list.iterrows(): # noqa: B007
scenario_registry_file_name = scn_name + '_registry.pkl'
# print(result_directory)
# print(scenario_registry_file_name)
- registry_file_data_addr = os.path.join(
+ registry_file_data_addr = os.path.join( # noqa: PTH118
result_directory, scenario_registry_file_name
)
- if not os.path.exists(registry_file_data_addr):
+ if not os.path.exists(registry_file_data_addr): # noqa: PTH110
self.scn_name_list_that_result_file_not_found.append(scn_name)
if len(self.scn_name_list_that_result_file_not_found) > 0:
@@ -140,11 +140,11 @@ def checkForNotExistingFile(self, ignore_not_found):
+ repr(result_directory)
)
- def prepareData(self):
+ def prepareData(self): # noqa: N802, D102
i = 0
# result_directory = self.project.project_settings.process['result_directory']
# self.project.scenario_list = self.project.scenario_list.iloc[0:20]
- for scn_name, row in self.project.scenario_list.iterrows():
+ for scn_name, row in self.project.scenario_list.iterrows(): # noqa: B007
self._RequiredDemandForAllNodesandtime[scn_name] = None
# settings_file_name = scn_name+'.xlsx'
# settings_file_addr = os.path.join(result_directory, settings_file_name)
@@ -156,7 +156,7 @@ def prepareData(self):
# self.time_size[scn_name] = len(self.data[scn_name].node['demand'].index)
self.index_to_scen_name[i] = scn_name
- i += 1
+ i += 1 # noqa: SIM113
self.scenario_prob[scn_name] = self.project.scenario_list.loc[
scn_name, 'Probability'
@@ -166,26 +166,26 @@ def prepareData(self):
ATTENTION: We need probability for any prbablistic result
"""
- def loadScneariodata(self, scn_name):
- if self.data[scn_name] != None:
+ def loadScneariodata(self, scn_name): # noqa: N802, D102
+ if self.data[scn_name] != None: # noqa: E711
return
- print('loading scenario ' + str(scn_name))
+ print('loading scenario ' + str(scn_name)) # noqa: T201
result_directory = self.result_directory
# scenario_registry_file_name = scn_name+"_registry.pkl"
# registry_file_data_addr = os.path.join(result_directory, scenario_registry_file_name)
scenario_registry_file_name = scn_name + '_registry.pkl'
- reg_addr = os.path.join(result_directory, scenario_registry_file_name)
+ reg_addr = os.path.join(result_directory, scenario_registry_file_name) # noqa: PTH118
try:
- with open(reg_addr, 'rb') as f:
+ with open(reg_addr, 'rb') as f: # noqa: PTH123
# print(output_addr)
- reg_file_data = pickle.load(f)
+ reg_file_data = pickle.load(f) # noqa: S301
self.registry[scn_name] = reg_file_data
res_file_data = self.registry[scn_name].result
- except:
+ except: # noqa: E722
scenario_registry_file_name = scn_name + '.res'
- res_addr = os.path.join(result_directory, scenario_registry_file_name)
- with open(res_addr, 'rb') as f:
- res_file_data = pickle.load(f)
+ res_addr = os.path.join(result_directory, scenario_registry_file_name) # noqa: PTH118
+ with open(res_addr, 'rb') as f: # noqa: PTH123
+ res_file_data = pickle.load(f) # noqa: S301
# scenario_registry_file_name = scn_name+".res"
# res_addr = os.path.join(result_directory, scenario_registry_file_name)
# with open(res_addr, 'rb') as f:
@@ -197,24 +197,24 @@ def loadScneariodata(self, scn_name):
self.remove_maximum_trials(res_file_data)
self.data[scn_name] = res_file_data
- def readData(self):
+ def readData(self): # noqa: N802, D102
# i=0
self.project.scenario_list = self.project.scenario_list.iloc[0:2]
result_directory = self.result_directory
- for scn_name, row in self.project.scenario_list.iterrows():
+ for scn_name, row in self.project.scenario_list.iterrows(): # noqa: B007
self._RequiredDemandForAllNodesandtime[scn_name] = None
scenario_registry_file_name = scn_name + '_registry.pkl'
- registry_file_data_addr = os.path.join(
+ registry_file_data_addr = os.path.join( # noqa: PTH118
result_directory, scenario_registry_file_name
)
- with open(registry_file_data_addr, 'rb') as f:
- if not os.path.exists(registry_file_data_addr):
+ with open(registry_file_data_addr, 'rb') as f: # noqa: PTH123
+ if not os.path.exists(registry_file_data_addr): # noqa: PTH110
raise ValueError(
'Registry File Not Found: ' + str(registry_file_data_addr)
)
- self.registry[scn_name] = pickle.load(f)
+ self.registry[scn_name] = pickle.load(f) # noqa: S301
# self.pipe_damages[scn_name] = current_scenario_registry.damage.pipe_all_damages
# self.node_damages[scn_name] = current_scenario_registry.node_damage
@@ -248,9 +248,9 @@ def readData(self):
"""
ATTENTION: We need probability for any prbablistic result
"""
- print(str(scn_name) + ' loaded')
+ print(str(scn_name) + ' loaded') # noqa: T201
- def remove_maximum_trials(self, data):
+ def remove_maximum_trials(self, data): # noqa: D102
all_time_list = data.maximum_trial_time
result_time_list = data.node['demand'].index.to_list()
result_time_max_trailed_list = [
@@ -267,7 +267,7 @@ def remove_maximum_trials(self, data):
if len(result_time_max_trailed_list) > 0:
# print(result_time_max_trailed_list)
att_data = data.node[att]
- att_data.drop(result_time_max_trailed_list, inplace=True)
+ att_data.drop(result_time_max_trailed_list, inplace=True) # noqa: PD002
data.node[att] = att_data
for att in data.link:
@@ -277,12 +277,12 @@ def remove_maximum_trials(self, data):
time for time in result_time_list if time in all_time_list
]
att_data = data.link[att]
- att_data.drop(result_time_max_trailed_list, inplace=True)
+ att_data.drop(result_time_max_trailed_list, inplace=True) # noqa: PD002
data.link[att] = att_data
flow_balance = data.node['demand'].sum(axis=1)
- time_to_drop = flow_balance[abs(flow_balance) >= 0.01].index
+ time_to_drop = flow_balance[abs(flow_balance) >= 0.01].index # noqa: PLR2004
# result_time_list = data.node['demand'].index.to_list()
# = [ time for time in result_time_list if time in all_time_list]
@@ -297,7 +297,7 @@ def remove_maximum_trials(self, data):
if len(result_time_max_trailed_list) > 0:
# print(result_time_max_trailed_list)
att_data = data.node[att]
- att_data.drop(result_time_max_trailed_list, inplace=True)
+ att_data.drop(result_time_max_trailed_list, inplace=True) # noqa: PD002
data.node[att] = att_data
for att in data.link:
@@ -309,13 +309,13 @@ def remove_maximum_trials(self, data):
result_time_max_trailed_list.sort()
if len(result_time_max_trailed_list) > 0:
att_data = data.link[att]
- att_data.drop(result_time_max_trailed_list, inplace=True)
+ att_data.drop(result_time_max_trailed_list, inplace=True) # noqa: PD002
data.link[att] = att_data
- def remove_maximum_trials_demand_flow(self, data):
+ def remove_maximum_trials_demand_flow(self, data): # noqa: D102
flow_balance = data.node['demand'].sum(axis=1)
- time_to_drop = flow_balance[abs(flow_balance) >= 0.01].index
+ time_to_drop = flow_balance[abs(flow_balance) >= 0.01].index # noqa: PLR2004
# result_time_list = data.node['demand'].index.to_list()
# = [ time for time in result_time_list if time in all_time_list]
@@ -326,9 +326,9 @@ def remove_maximum_trials_demand_flow(self, data):
result_time_max_trailed_list = [
time for time in result_time_list if time in time_to_drop
]
- print(result_time_max_trailed_list)
+ print(result_time_max_trailed_list) # noqa: T201
att_data = data.node[att]
- att_data.drop(result_time_max_trailed_list, inplace=True)
+ att_data.drop(result_time_max_trailed_list, inplace=True) # noqa: PD002
data.node[att] = att_data
for att in data.link:
@@ -338,14 +338,14 @@ def remove_maximum_trials_demand_flow(self, data):
time for time in result_time_list if time in time_to_drop
]
att_data = data.link[att]
- att_data.drop(result_time_max_trailed_list, inplace=True)
+ att_data.drop(result_time_max_trailed_list, inplace=True) # noqa: PD002
data.link[att] = att_data
- def readPopulation(
+ def readPopulation( # noqa: N802, D102
self,
population_xlsx_addr='demandNode-Northridge.xlsx',
demand_node_header='NodeID',
- population_header='#Customer',
+ population_header='#Customer', # noqa: ARG002
):
pop = pd.read_excel(population_xlsx_addr)
pop = pop.set_index(demand_node_header)
@@ -362,7 +362,7 @@ def readPopulation(
+ repr(demand_node_without_population)
)
- def getRequiredDemandForAllNodesandtime(self, scn_name):
+ def getRequiredDemandForAllNodesandtime(self, scn_name): # noqa: N802
"""**********
ATTENTION: We Assume that all scenarios have the same time indexing
**********
@@ -374,10 +374,10 @@ def getRequiredDemandForAllNodesandtime(self, scn_name):
req_node_demand : Pandas DataFrame
Demand for all nodes and in all time
- """
+ """ # noqa: D205, D400
self.loadScneariodata(scn_name)
demand_ratio = self.demand_ratio
- if type(self._RequiredDemandForAllNodesandtime[scn_name]) != type(None):
+ if type(self._RequiredDemandForAllNodesandtime[scn_name]) != type(None): # noqa: E721
return self._RequiredDemandForAllNodesandtime[scn_name]
undamaged_wn = self.wn
time_index = self.data[scn_name].node['demand'].index
@@ -399,9 +399,9 @@ def getRequiredDemandForAllNodesandtime(self, scn_name):
i += 1
node = undamaged_wn.get_node(node_name)
pattern_list = node.demand_timeseries_list.pattern_list()
- if pattern_list[0] != None:
+ if pattern_list[0] != None: # noqa: E711
node_pattern_list[node_name] = pattern_list[0].name
- elif pattern_list[0] == None and default_pattern != None:
+ elif pattern_list[0] == None and default_pattern != None: # noqa: E711
node_pattern_list[node_name] = str(default_pattern)
else:
node_pattern_list[node_name] = None
@@ -422,8 +422,8 @@ def getRequiredDemandForAllNodesandtime(self, scn_name):
for time in iter(time_index):
multiplier[pattern_name].loc[time] = cur_pattern.at(time)
- variable_base_demand = []
- variable_node_name_list = []
+ variable_base_demand = [] # noqa: F841
+ variable_node_name_list = [] # noqa: F841
for node_name, pattern_name in node_pattern_list.items():
cur_node_req_demand = (
multiplier[pattern_name]
@@ -471,55 +471,55 @@ def getRequiredDemandForAllNodesandtime(self, scn_name):
)
return self._RequiredDemandForAllNodesandtime[scn_name]
- def AS_getDLIndexPopulation(
+ def AS_getDLIndexPopulation( # noqa: N802, D102
self,
- iPopulation='No',
- ratio=False,
- consider_leak=False,
+ iPopulation='No', # noqa: N803
+ ratio=False, # noqa: FBT002
+ consider_leak=False, # noqa: FBT002
leak_ratio=0.75,
):
scenario_list = list(self.data.keys())
- all_scenario_DL_data = {}
+ all_scenario_DL_data = {} # noqa: N806
for scn_name in scenario_list:
- cur_scn_DL = self.getDLIndexPopulation_4(
+ cur_scn_DL = self.getDLIndexPopulation_4( # noqa: N806
scn_name,
iPopulation=iPopulation,
ratio=ratio,
consider_leak=consider_leak,
leak_ratio=leak_ratio,
)
- cur_scn_DL = cur_scn_DL.to_dict()
+ cur_scn_DL = cur_scn_DL.to_dict() # noqa: N806
all_scenario_DL_data[scn_name] = cur_scn_DL
return pd.DataFrame.from_dict(all_scenario_DL_data)
- def AS_getQNIndexPopulation(
+ def AS_getQNIndexPopulation( # noqa: N802, D102
self,
- iPopulation='No',
- ratio=False,
- consider_leak=False,
+ iPopulation='No', # noqa: N803
+ ratio=False, # noqa: FBT002
+ consider_leak=False, # noqa: FBT002
leak_ratio=0.75,
):
scenario_list = list(self.data.keys())
- all_scenario_QN_data = {}
+ all_scenario_QN_data = {} # noqa: N806
for scn_name in scenario_list:
self.loadScneariodata(scn_name)
- cur_scn_QN = self.getQNIndexPopulation_4(
+ cur_scn_QN = self.getQNIndexPopulation_4( # noqa: N806
scn_name,
iPopulation=iPopulation,
ratio=ratio,
consider_leak=consider_leak,
leak_ratio=leak_ratio,
)
- cur_scn_QN = cur_scn_QN.to_dict()
+ cur_scn_QN = cur_scn_QN.to_dict() # noqa: N806
all_scenario_QN_data[scn_name] = cur_scn_QN
return pd.DataFrame.from_dict(all_scenario_QN_data)
- def AS_getOutage_4(
+ def AS_getOutage_4( # noqa: N802, D102
self,
- LOS='DL',
- iConsider_leak=False,
+ LOS='DL', # noqa: N803
+ iConsider_leak=False, # noqa: FBT002, N803
leak_ratio=0,
consistency_time_window=7200,
):
@@ -536,16 +536,16 @@ def AS_getOutage_4(
)
cur_scn_outage = cur_scn_outage['restoration_time'].to_dict()
all_scenario_outage_data[scn_name] = cur_scn_outage
- i += 1
+ i += 1 # noqa: SIM113
return pd.DataFrame.from_dict(all_scenario_outage_data)
- def PR_getBSCPercentageExcedanceCurce(self, data_frame, restoration_percentage):
+ def PR_getBSCPercentageExcedanceCurce(self, data_frame, restoration_percentage): # noqa: N802, D102
max_time = data_frame.max().max()
restore_time = {}
- if type(self._population_data) == type(None):
+ if type(self._population_data) == type(None): # noqa: E721
demand_node_name_list = data_frame.index
population = pd.Series(index=demand_node_name_list, data=1)
else:
@@ -597,17 +597,17 @@ def PR_getBSCPercentageExcedanceCurce(self, data_frame, restoration_percentage):
list(self.scenario_prob.keys()), 'restore_time'
]
restore_data['prob'] = list(self.scenario_prob.values())
- restore_data.sort_values('restore_time', ascending=False, inplace=True)
+ restore_data.sort_values('restore_time', ascending=False, inplace=True) # noqa: PD002
ep_mat = Helper.EPHelper(restore_data['prob'].to_numpy())
restore_data['EP'] = ep_mat
return restore_data
- def PR_getCurveExcedence(
+ def PR_getCurveExcedence( # noqa: C901, N802, D102
self,
data_frame,
result_type='mean',
- daily=False,
+ daily=False, # noqa: FBT002
min_time=0,
max_time=24 * 3600 * 1000,
):
@@ -623,12 +623,12 @@ def PR_getCurveExcedence(
cur_scn_data = cur_scn_data[cur_scn_data.index >= min_time]
cur_scn_data = cur_scn_data[cur_scn_data.index <= max_time]
- if daily == True:
+ if daily == True: # noqa: E712
cur_scn_data = self.getResultSeperatedDaily(cur_scn_data)
if result_type == 'mean':
cur_mean_res = cur_scn_data.mean()
- if type(cur_mean_res) != pd.core.series.Series:
+ if type(cur_mean_res) != pd.core.series.Series: # noqa: E721
temp_res = {'mean_dmg': cur_mean_res}
dmg_index_list.append('mean_dmg')
else:
@@ -639,7 +639,7 @@ def PR_getCurveExcedence(
dmg_index_list.append(temp_dmg_index)
elif result_type == 'min':
dmg_min_res = cur_scn_data.min()
- if type(dmg_min_res) != pd.core.series.Series:
+ if type(dmg_min_res) != pd.core.series.Series: # noqa: E721
temp_res = {'min_dmg': dmg_min_res}
dmg_index_list.append('min_dmg')
else:
@@ -650,7 +650,7 @@ def PR_getCurveExcedence(
dmg_index_list.append(temp_dmg_index)
elif result_type == 'max':
dmg_max_res = cur_scn_data.min()
- if type(dmg_max_res) != pd.core.series.Series:
+ if type(dmg_max_res) != pd.core.series.Series: # noqa: E721
temp_res = {'max_dmg': dmg_max_res}
dmg_index_list.append('max_dmg')
else:
@@ -668,14 +668,14 @@ def PR_getCurveExcedence(
table = pd.DataFrame.from_dict(table_temp).set_index('index')
res = pd.DataFrame(
- index=[i for i in range(len(table.index))],
+ index=[i for i in range(len(table.index))], # noqa: C416
dtype=np.float64,
)
for dmg_name in dmg_index_list:
select_columns = ['prob']
select_columns.extend([dmg_name])
loop_table = table[select_columns]
- loop_table.sort_values(dmg_name, inplace=True)
+ loop_table.sort_values(dmg_name, inplace=True) # noqa: PD002
ep_mat = Helper.EPHelper(loop_table['prob'].to_numpy())
res[dmg_name] = loop_table[dmg_name].to_numpy()
@@ -683,7 +683,7 @@ def PR_getCurveExcedence(
return res
- def getResultSeperatedDaily(self, data, begin_time=0):
+ def getResultSeperatedDaily(self, data, begin_time=0): # noqa: N802, D102
data = data[data.index >= begin_time]
data.index = (data.index - begin_time) / (24 * 3600)
diff --git a/modules/systemPerformance/REWET/REWET/Sim/Simulation.py b/modules/systemPerformance/REWET/REWET/Sim/Simulation.py
index 2b189319e..af697af48 100644
--- a/modules/systemPerformance/REWET/REWET/Sim/Simulation.py
+++ b/modules/systemPerformance/REWET/REWET/Sim/Simulation.py
@@ -1,4 +1,4 @@
-import math
+import math # noqa: INP001, D100
import os
import numpy as np
@@ -7,7 +7,7 @@
from EnhancedWNTR.sim.results import SimulationResults
-class Hydraulic_Simulation:
+class Hydraulic_Simulation: # noqa: D101
def __init__(
self,
wn,
@@ -28,24 +28,24 @@ def __init__(
self.wn.options.hydraulic.demand_model = 'PDA'
temp_folder = settings['temp_directory']
- if type(temp_folder) != str:
- raise ValueError('temp folder type is not str')
+ if type(temp_folder) != str: # noqa: E721
+ raise ValueError('temp folder type is not str') # noqa: EM101, TRY003
- if settings['save_time_step'] == True:
+ if settings['save_time_step'] == True: # noqa: E712
if temp_folder == '':
self.temp_directory = (
str(worker_rank) + '_' + repr(current_stop_time)
)
else:
- self.temp_directory = os.path.join(
+ self.temp_directory = os.path.join( # noqa: PTH118
temp_folder, str(worker_rank) + '_' + repr(current_stop_time)
)
- elif settings['save_time_step'] == False:
+ elif settings['save_time_step'] == False: # noqa: E712
if temp_folder == '':
self.temp_directory = str(worker_rank)
else:
- self.temp_directory = os.path.join(temp_folder, str(worker_rank))
+ self.temp_directory = os.path.join(temp_folder, str(worker_rank)) # noqa: PTH118
else:
raise ValueError(
"Unknown value for settings 'save_time_step': " + repr()
@@ -53,18 +53,18 @@ def __init__(
self._prev_isolated_junctions = prev_isolated_junctions
self._prev_isolated_links = prev_isolated_links
- def removeNonDemandNegativeNodeByPythonMinorLoss(self, maximum_iteration):
+ def removeNonDemandNegativeNodeByPythonMinorLoss(self, maximum_iteration): # noqa: N802, D102
current_stop_time = self.current_stop_time
- minimum_pressure = self.minimum_pressure
- required_pressure = self.required_pressure
+ minimum_pressure = self.minimum_pressure # noqa: F841
+ required_pressure = self.required_pressure # noqa: F841
temp_file_dest = self.temp_directory
orginal_c_dict = {}
for itrr in range(maximum_iteration):
- print(itrr)
+ print(itrr) # noqa: T201
sim = EpanetSimulator(self.wn)
self.s = sim
self._prev_isolated_junctions, self._prev_isolated_links = (
- sim._get_isolated_junctions_and_links(
+ sim._get_isolated_junctions_and_links( # noqa: SLF001
self._prev_isolated_junctions, self._prev_isolated_links
)
)
@@ -100,31 +100,31 @@ def removeNonDemandNegativeNodeByPythonMinorLoss(self, maximum_iteration):
orginal_c_dict[pipe_name] = new_closed_pipes[pipe_name]
return orginal_c_dict
- def isolateReservoirs(self, isolated_nodes):
+ def isolateReservoirs(self, isolated_nodes): # noqa: N802, D102
for reservoir_name, reservoir in self.wn.reservoirs():
- if self.wn._node_reg.get_usage(reservoir_name) == None:
- reservoir._is_isolated = True
+ if self.wn._node_reg.get_usage(reservoir_name) == None: # noqa: SLF001, E711
+ reservoir._is_isolated = True # noqa: SLF001
isolated_nodes.add(reservoir_name)
return isolated_nodes
- def isolateTanks(self, isolated_nodes):
+ def isolateTanks(self, isolated_nodes): # noqa: N802, D102
for tank_name, tank in self.wn.tanks():
- if self.wn._node_reg.get_usage(tank_name) == None:
- tank._is_isolated = True
+ if self.wn._node_reg.get_usage(tank_name) == None: # noqa: SLF001, E711
+ tank._is_isolated = True # noqa: SLF001
isolated_nodes.add(tank_name)
return isolated_nodes
- def removeNonDemandNegativeNodeByPythonClose(self, maximum_iteration):
+ def removeNonDemandNegativeNodeByPythonClose(self, maximum_iteration): # noqa: N802, D102
current_stop_time = self.current_stop_time
- minimum_pressure = self.minimum_pressure
- required_pressure = self.required_pressure
+ minimum_pressure = self.minimum_pressure # noqa: F841
+ required_pressure = self.required_pressure # noqa: F841
temp_file_dest = self.temp_directory
self.closed_pipes = {}
for itrr in range(maximum_iteration):
- print(itrr)
+ print(itrr) # noqa: T201
sim = EpanetSimulator(self.wn)
self._prev_isolated_junctions, self._prev_isolated_links = (
- sim._get_isolated_junctions_and_links(
+ sim._get_isolated_junctions_and_links( # noqa: SLF001
self._prev_isolated_junctions, self._prev_isolated_links
)
)
@@ -156,25 +156,25 @@ def removeNonDemandNegativeNodeByPythonClose(self, maximum_iteration):
# self.closed_pipes = orginal_c_dict
# return orginal_c_dict
- def rollBackPipeMinorLoss(self, altered_pipes):
+ def rollBackPipeMinorLoss(self, altered_pipes): # noqa: N802, D102
for pipe_name in altered_pipes:
self.wn.get_link(pipe_name).minor_loss = altered_pipes[pipe_name]
- def rollBackPipeClose(self):
+ def rollBackPipeClose(self): # noqa: N802, D102
altered_pipes = self.closed_pipes
for pipe_name in altered_pipes:
pipe = self.wn.get_link(pipe_name)
pipe.initial_status = altered_pipes[pipe_name]
- def performSimulation(self, next_event_time, iModified):
+ def performSimulation(self, next_event_time, iModified): # noqa: N802, N803, D102
current_stop_time = self.current_stop_time
- minimum_pressure = self.minimum_pressure
- required_pressure = self.required_pressure
+ minimum_pressure = self.minimum_pressure # noqa: F841
+ required_pressure = self.required_pressure # noqa: F841
temp_file_dest = self.temp_directory
sim = EpanetSimulator(self.wn)
# self.s=sim
self._prev_isolated_junctions, self._prev_isolated_links = (
- sim._get_isolated_junctions_and_links(
+ sim._get_isolated_junctions_and_links( # noqa: SLF001
self._prev_isolated_junctions, self._prev_isolated_links
)
)
@@ -184,10 +184,10 @@ def performSimulation(self, next_event_time, iModified):
self._prev_isolated_junctions = self.isolateTanks(
self._prev_isolated_junctions
)
- print('***********')
- print(len(self._prev_isolated_junctions))
- print(len(self._prev_isolated_links))
- print('-----------')
+ print('***********') # noqa: T201
+ print(len(self._prev_isolated_junctions)) # noqa: T201
+ print(len(self._prev_isolated_links)) # noqa: T201
+ print('-----------') # noqa: T201
sim.manipulateTimeOrder(
current_stop_time, next_event_time
) # , change_time_step=True, min_correction_time_step=self._min_correction_time)
@@ -198,10 +198,10 @@ def performSimulation(self, next_event_time, iModified):
)
return rr, i_run_successful
- def estimateRun(self, next_event_time, iModified):
+ def estimateRun(self, next_event_time, iModified): # noqa: N802, N803, D102
current_stop_time = self.current_stop_time
- minimum_pressure = self.minimum_pressure
- required_pressure = self.required_pressure
+ minimum_pressure = self.minimum_pressure # noqa: F841
+ required_pressure = self.required_pressure # noqa: F841
sim = EpanetSimulator(self.wn)
duration = self.wn.options.time.duration
@@ -210,7 +210,7 @@ def estimateRun(self, next_event_time, iModified):
temp_file_dest = self.temp_directory
self._prev_isolated_junctions, self._prev_isolated_links = (
- sim._get_isolated_junctions_and_links(
+ sim._get_isolated_junctions_and_links( # noqa: SLF001
self._prev_isolated_junctions, self._prev_isolated_links
)
)
@@ -231,10 +231,10 @@ def estimateRun(self, next_event_time, iModified):
return rr, i_run_successful
- def estimateWithoutRun(self, result, next_event_time):
+ def estimateWithoutRun(self, result, next_event_time): # noqa: N802, D102
current_stop_time = self.current_stop_time
- minimum_pressure = self.minimum_pressure
- required_pressure = self.required_pressure
+ minimum_pressure = self.minimum_pressure # noqa: F841
+ required_pressure = self.required_pressure # noqa: F841
time = result.node['demand'].index.to_list()
unreliable_time_list = result.maximum_trial_time
@@ -247,7 +247,7 @@ def estimateWithoutRun(self, result, next_event_time):
break
if last_valid_time == -1:
- raise ValueError('Last reliabale time is not found')
+ raise ValueError('Last reliabale time is not found') # noqa: EM101, TRY003
time_step = min(
self.wn.options.time.hydraulic_timestep,
@@ -266,7 +266,7 @@ def estimateWithoutRun(self, result, next_event_time):
sim = EpanetSimulator(self.wn)
self._prev_isolated_junctions, self._prev_isolated_links = (
- sim._get_isolated_junctions_and_links(
+ sim._get_isolated_junctions_and_links( # noqa: SLF001
self._prev_isolated_junctions, self._prev_isolated_links
)
)
@@ -280,8 +280,8 @@ def estimateWithoutRun(self, result, next_event_time):
# available_node_list = [node_name for node_name in self.wn.node_name_list if self.wn.get_node(node_name)._is_isolated == False]
# available_link_list = [link_name for link_name in self.wn.link_name_list if self.wn.get_link(link_name)._is_isolated == False]
- available_node_list = [node_name for node_name in self.wn.node_name_list]
- available_link_list = [link_name for link_name in self.wn.link_name_list]
+ available_node_list = [node_name for node_name in self.wn.node_name_list] # noqa: C416
+ available_link_list = [link_name for link_name in self.wn.link_name_list] # noqa: C416
available_node_list = [
node_name
@@ -331,7 +331,7 @@ def estimateWithoutRun(self, result, next_event_time):
# print("---------------")
# print(result_node_head)
# print("---------------")
- if first_step == True:
+ if first_step == True: # noqa: E712
first_step = False
else:
self.updateTankHeadsAndPressure(
@@ -367,7 +367,7 @@ def estimateWithoutRun(self, result, next_event_time):
}
return rr, True
- def updateTankHeadsAndPressure(
+ def updateTankHeadsAndPressure( # noqa: N802
self,
demand,
head,
@@ -379,7 +379,7 @@ def updateTankHeadsAndPressure(
----------
wn: wntrfr.network.WaterNetworkModel
- """
+ """ # noqa: D205
dt = time_step
# print(sim_time)
demand_na = demand.loc[sim_time].isna()
@@ -388,7 +388,7 @@ def updateTankHeadsAndPressure(
for tank_name, tank in self.wn.tanks():
# checks if the node is isolated.
- if tank._is_isolated == True:
+ if tank._is_isolated == True: # noqa: SLF001, E712
continue
# checks of this node has been isolated at the last valid time. if
@@ -411,7 +411,7 @@ def updateTankHeadsAndPressure(
else:
q_net = 0.0
- dV = q_net * dt
+ dV = q_net * dt # noqa: N806
previous_head = head.loc[sim_time, tank_name]
if tank.vol_curve is None:
@@ -424,8 +424,8 @@ def updateTankHeadsAndPressure(
previous_level = previous_head - tank.elevation
- V0 = np.interp(previous_level, level_x, volume_y)
- V1 = V0 + dV
+ V0 = np.interp(previous_level, level_x, volume_y) # noqa: N806
+ V1 = V0 + dV # noqa: N806
new_level = np.interp(V1, volume_y, level_x)
delta_h = new_level - previous_level
@@ -443,7 +443,7 @@ def updateTankHeadsAndPressure(
head.loc[sim_time, tank_name] = new_head
pressure.loc[sim_time, tank_name] = new_head - tank.elevation
- def approximateNewResult(
+ def approximateNewResult( # noqa: N802, D102
self,
rr,
current_stop_time,
@@ -460,7 +460,7 @@ def approximateNewResult(
not_isolated_tanks = [
tank_name
for tank_name, tank in self.wn.tanks()
- if tank._is_isolated == False
+ if tank._is_isolated == False # noqa: SLF001, E712
]
# isolated_tanks = [tank_name for tank_name in self.tanks_name_list if tank_name in self._prev_isolated_junctions]
# isolated_nodes = [node_name for node_name in self.node_name_list if node_name in self._prev_isolated_junctions]
@@ -472,11 +472,11 @@ def approximateNewResult(
]
tank_min_level_list = [
self.wn.get_node(l).min_level
- for l in not_isolated_tanks
+ for l in not_isolated_tanks # noqa: E741
]
tank_max_level_list = [
self.wn.get_node(l).max_level
- for l in not_isolated_tanks
+ for l in not_isolated_tanks # noqa: E741
]
tanks_min_heads = [
@@ -494,9 +494,9 @@ def approximateNewResult(
tanks_min_heads = pd.Series(tanks_min_heads, not_isolated_tanks)
tanks_max_heads = pd.Series(tanks_max_heads, not_isolated_tanks)
- print(current_stop_time)
- print(time_step)
- print(end_time)
+ print(current_stop_time) # noqa: T201
+ print(time_step) # noqa: T201
+ print(end_time) # noqa: T201
for time_step_iter in range(
current_stop_time + time_step, end_time + 1, time_step
):
@@ -544,11 +544,11 @@ def approximateNewResult(
]
tank_min_level_list = [
self.wn.get_node(l).min_level
- for l in not_isolated_tanks
+ for l in not_isolated_tanks # noqa: E741
]
tank_max_level_list = [
self.wn.get_node(l).max_level
- for l in not_isolated_tanks
+ for l in not_isolated_tanks # noqa: E741
]
tanks_min_heads = [
diff --git a/modules/systemPerformance/REWET/REWET/StochasticModel.py b/modules/systemPerformance/REWET/REWET/StochasticModel.py
index a5d65bef6..3291e6fbd 100644
--- a/modules/systemPerformance/REWET/REWET/StochasticModel.py
+++ b/modules/systemPerformance/REWET/REWET/StochasticModel.py
@@ -1,7 +1,7 @@
"""Created on Wed Apr 8 20:19:10 2020
@author: snaeimi
-"""
+""" # noqa: N999, D400
import logging
import os
@@ -23,7 +23,7 @@
logger = logging.getLogger(__name__)
-class StochasticModel:
+class StochasticModel: # noqa: D101
def __init__(
self,
water_network,
@@ -32,17 +32,17 @@ def __init__(
simulation_end_time,
restoration,
mode='PDD',
- i_restoration=True,
+ i_restoration=True, # noqa: FBT002
):
if (
- type(water_network) != wntrfr.network.model.WaterNetworkModel
- and type(water_network) != EnhancedWNTR.network.model.WaterNetworkModel
+ type(water_network) != wntrfr.network.model.WaterNetworkModel # noqa: E721
+ and type(water_network) != EnhancedWNTR.network.model.WaterNetworkModel # noqa: E721
):
- raise ValueError(
- 'Water_network model is not legitimate water Network Model'
+ raise ValueError( # noqa: TRY003
+ 'Water_network model is not legitimate water Network Model' # noqa: EM101
)
- if type(damage_model) != Damage.Damage:
- raise ValueError('damage_model is not a ligitimate Damage Model')
+ if type(damage_model) != Damage.Damage: # noqa: E721
+ raise ValueError('damage_model is not a ligitimate Damage Model') # noqa: EM101, TRY003
self.wn = water_network
self.damage_model = damage_model
self._simulation_time = simulation_end_time
@@ -52,7 +52,7 @@ def __init__(
self.timeline.checkAndAmendTime()
self.simulation_mode = None
- if mode == 'PDD' or mode == 'DD':
+ if mode == 'PDD' or mode == 'DD': # noqa: PLR1714
self.simulation_mode = mode
else:
self.simulation_mode = 'PDD'
@@ -68,7 +68,7 @@ def __init__(
self._prev_isolated_links = OrderedSet()
self.first_leak_flag = True
- def runLinearScenario(self, damage, settings, worker_rank=None):
+ def runLinearScenario(self, damage, settings, worker_rank=None): # noqa: C901, N802
"""Runs a simple linear analysis of water damage scenario
Parameters
@@ -80,17 +80,17 @@ def runLinearScenario(self, damage, settings, worker_rank=None):
-------
Result.
- """
+ """ # noqa: D205, D400, D401
while self.timeline.iContinue():
sys.stdout.flush()
current_stop_time = self.timeline.getCurrentStopTime()
- print('--------------------------------------')
- print('At stop Time: ' + repr(current_stop_time / 3600))
+ print('--------------------------------------') # noqa: T201
+ print('At stop Time: ' + repr(current_stop_time / 3600)) # noqa: T201
# =============================================================================
# Restoration event Block
if (
self.timeline.iCurenttimeRestorationEvent()
- and self.iRestoration == True
+ and self.iRestoration == True # noqa: E712
):
logger.debug('\t Restoration Event ')
@@ -156,10 +156,10 @@ def runLinearScenario(self, damage, settings, worker_rank=None):
duration = self.wn.options.time.duration
report_time_step = self.wn.options.time.report_timestep
try: # Run with modified EPANET V2.2
- print('Performing method 1')
+ print('Performing method 1') # noqa: T201
rr, i_run_successful = hyd_sim.performSimulation(
current_stop_time,
- True,
+ True, # noqa: FBT003
)
if current_stop_time in rr.maximum_trial_time:
pass
@@ -186,10 +186,10 @@ def runLinearScenario(self, damage, settings, worker_rank=None):
hydraulic_impact
)
- except Exception as epa_err_1:
+ except Exception as epa_err_1: # noqa: TRY302
raise
if epa_err_1.args[0] == 'EPANET Error 110':
- print('Method 1 failed. Performing method 2')
+ print('Method 1 failed. Performing method 2') # noqa: T201
self.wn.options.time.duration = duration
self.wn.options.time.report_timestep = (
report_time_step
@@ -199,9 +199,9 @@ def runLinearScenario(self, damage, settings, worker_rank=None):
] = -1
pipe.initial_status = initial_pipe_status
self._prev_isolated_junctions = (
- hyd_sim._prev_isolated_junctions
+ hyd_sim._prev_isolated_junctions # noqa: SLF001
)
- self._prev_isolated_links = hyd_sim._prev_isolated_links
+ self._prev_isolated_links = hyd_sim._prev_isolated_links # noqa: SLF001
self.wn.options.time.duration = duration
self.wn.options.time.report_timestep = report_time_step
damage.applyPipeDamages(self.wn, current_stop_time)
@@ -209,7 +209,7 @@ def runLinearScenario(self, damage, settings, worker_rank=None):
damage.applyPumpDamages(self.wn, current_stop_time)
damage.applyTankDamages(self.wn, current_stop_time)
- if self.iRestoration == True:
+ if self.iRestoration == True: # noqa: E712
event_time_list = self.restoration.initialize(
self.wn, current_stop_time
) # starts restoration
@@ -217,23 +217,23 @@ def runLinearScenario(self, damage, settings, worker_rank=None):
# =============================================================================
# This is for updatng the pipe damage log
- if settings['record_damage_table_logs'] == True:
- self.restoration._registry.updatePipeDamageTableTimeSeries(
+ if settings['record_damage_table_logs'] == True: # noqa: E712
+ self.restoration._registry.updatePipeDamageTableTimeSeries( # noqa: SLF001
current_stop_time
)
- self.restoration._registry.updateNodeDamageTableTimeSeries(
+ self.restoration._registry.updateNodeDamageTableTimeSeries( # noqa: SLF001
current_stop_time
)
# =============================================================================
# running the model
next_event_time = self.timeline.getNextTime()
- logger.debug('next event time is: ' + repr(next_event_time))
+ logger.debug('next event time is: ' + repr(next_event_time)) # noqa: G003
self.wn.implicitLeakToExplicitReservoir(self.registry)
- print('***** Running hydraulic *****')
+ print('***** Running hydraulic *****') # noqa: T201
- if type(worker_rank) != str:
+ if type(worker_rank) != str: # noqa: E721
worker_rank = str(worker_rank)
hyd_sim = Hydraulic_Simulation(
@@ -248,14 +248,14 @@ def runLinearScenario(self, damage, settings, worker_rank=None):
duration = self.wn.options.time.duration
report_time_step = self.wn.options.time.report_timestep
try: # Run with modified EPANET V2.2
- print('Performing method 1')
+ print('Performing method 1') # noqa: T201
rr, i_run_successful = hyd_sim.performSimulation(
next_event_time,
- True,
+ True, # noqa: FBT003
)
except Exception as epa_err_1:
if epa_err_1.args[0] == 'EPANET Error 110':
- print('Method 1 failed. Performing method 2')
+ print('Method 1 failed. Performing method 2') # noqa: T201
try: # Remove Non-Demand Node by Python-Side iterative algorithm with closing
# self.wn.options.time.duration = duration
# self.wn.options.time.report_timestep = report_time_step
@@ -274,11 +274,11 @@ def runLinearScenario(self, damage, settings, worker_rank=None):
# hyd_sim.rollBackPipeClose()
rr, i_run_successful = hyd_sim.estimateRun(
next_event_time,
- True,
+ True, # noqa: FBT003
)
except Exception as epa_err_3:
if epa_err_3.args[0] == 'EPANET Error 110':
- print('Method 3 failed. Performing method 4')
+ print('Method 3 failed. Performing method 4') # noqa: T201
try: # Extend result from the result at the beginning of the time step with modified EPANET V2.2
self.wn.options.time.duration = duration
self.wn.options.time.report_timestep = (
@@ -287,7 +287,7 @@ def runLinearScenario(self, damage, settings, worker_rank=None):
rr, i_run_successful = (
hyd_sim.performSimulation(
next_event_time,
- False,
+ False, # noqa: FBT003
)
)
except Exception as epa_err_4:
@@ -297,14 +297,14 @@ def runLinearScenario(self, damage, settings, worker_rank=None):
duration
)
self.wn.options.time.report_timestep = report_time_step
- print(
+ print( # noqa: T201
'Method 4 failed. Performing method 5'
)
# Extend result from the result at the beginning of the time step with modified EPANET V2.2
rr, i_run_successful = (
hyd_sim.estimateRun(
next_event_time,
- False,
+ False, # noqa: FBT003
)
)
except Exception as epa_err_5:
@@ -313,7 +313,7 @@ def runLinearScenario(self, damage, settings, worker_rank=None):
== 'EPANET Error 110'
):
try:
- print(
+ print( # noqa: T201
'Method 5 failed. Performing method 6'
)
self.wn.options.time.duration = duration
@@ -325,26 +325,26 @@ def runLinearScenario(self, damage, settings, worker_rank=None):
)
)
except Exception as epa_err_6:
- print(
+ print( # noqa: T201
'ERROR in rank='
+ repr(worker_rank)
+ ' and time='
+ repr(current_stop_time)
)
- raise epa_err_6
+ raise epa_err_6 # noqa: TRY201
else:
- raise epa_err_5
+ raise epa_err_5 # noqa: TRY201
else:
- raise epa_err_4
+ raise epa_err_4 # noqa: TRY201
else:
- raise epa_err_3
+ raise epa_err_3 # noqa: TRY201
else:
- raise epa_err_2
+ raise epa_err_2 # noqa: TRY201
else:
- raise epa_err_1
- self._prev_isolated_junctions = hyd_sim._prev_isolated_junctions
- self._prev_isolated_links = hyd_sim._prev_isolated_links
- print(
+ raise epa_err_1 # noqa: TRY201
+ self._prev_isolated_junctions = hyd_sim._prev_isolated_junctions # noqa: SLF001
+ self._prev_isolated_links = hyd_sim._prev_isolated_links # noqa: SLF001
+ print( # noqa: T201
'***** Finish Running at time '
+ repr(current_stop_time)
+ ' '
@@ -352,9 +352,9 @@ def runLinearScenario(self, damage, settings, worker_rank=None):
+ ' *****'
)
- if i_run_successful == False:
+ if i_run_successful == False: # noqa: E712
continue
- self.wn.updateWaterNetworkModelWithResult(rr, self.restoration._registry)
+ self.wn.updateWaterNetworkModelWithResult(rr, self.restoration._registry) # noqa: SLF001
self.KeepLinearResult(
rr,
@@ -369,21 +369,21 @@ def runLinearScenario(self, damage, settings, worker_rank=None):
# =============================================================================
# self.resoration._registry.updateTankTimeSeries(self.wn, current_stop_time)
- self.restoration._registry.updateRestorationIncomeWaterTimeSeries(
+ self.restoration._registry.updateRestorationIncomeWaterTimeSeries( # noqa: SLF001
self.wn, current_stop_time
)
return self._linear_result
- def KeepLinearResult(
+ def KeepLinearResult( # noqa: C901, N802, D102
self,
result,
isolated_nodes,
node_attributes=None,
link_attributes=None,
- iCheck=False,
+ iCheck=False, # noqa: FBT002, ARG002, N803
): # , iNeedTimeCorrection=False, start_time=None):
- if self.registry.if_first_event_occured == False:
+ if self.registry.if_first_event_occured == False: # noqa: E712
self.registry.pre_event_demand_met = (
self.registry.pre_event_demand_met.append(result.node['demand'])
)
@@ -394,11 +394,11 @@ def KeepLinearResult(
# link_attributes = ['linkquality', 'flowrate', 'headloss', 'velocity', 'status', 'setting', 'frictionfact', 'rxnrate']
just_initialized_flag = False
- if self._linear_result == None:
+ if self._linear_result == None: # noqa: E711
just_initialized_flag = True
self._linear_result = result
- self.restoration._registry.result = self._linear_result
+ self.restoration._registry.result = self._linear_result # noqa: SLF001
node_result_type_elimination_list = set(result.node.keys()) - set(
node_attributes
)
@@ -414,7 +414,7 @@ def KeepLinearResult(
self._linear_result.node['leak'] = pd.DataFrame(dtype=float)
- active_pipe_damages = self.restoration._registry.active_pipe_damages
+ active_pipe_damages = self.restoration._registry.active_pipe_damages # noqa: SLF001
temp_active = active_pipe_damages.copy()
for virtual_demand_node in active_pipe_damages:
@@ -455,9 +455,9 @@ def KeepLinearResult(
result.node['demand'][real_demand_nodes] = result.node['demand'][
virtual_demand_nodes
]
- result.node['demand'].drop(virtual_demand_nodes, axis=1, inplace=True)
+ result.node['demand'].drop(virtual_demand_nodes, axis=1, inplace=True) # noqa: PD002
- active_nodal_damages = self.restoration._registry.active_nodal_damages
+ active_nodal_damages = self.restoration._registry.active_nodal_damages # noqa: SLF001
temp_active = active_nodal_damages.copy()
for virtual_demand_node in active_nodal_damages:
@@ -500,14 +500,14 @@ def KeepLinearResult(
non_isolated_pairs, axis=1
)
- if just_initialized_flag == False:
+ if just_initialized_flag == False: # noqa: E712
self._linear_result.maximum_trial_time.extend(result.maximum_trial_time)
saved_max_time = self._linear_result.node[
- list(self._linear_result.node.keys())[0]
+ list(self._linear_result.node.keys())[0] # noqa: RUF015
].index.max()
to_be_saved_min_time = result.node[
- list(result.node.keys())[0]
+ list(result.node.keys())[0] # noqa: RUF015
].index.min()
if (
abs(to_be_saved_min_time - saved_max_time) != 0
@@ -539,7 +539,7 @@ def KeepLinearResult(
].iloc[0]
if att in result.node:
- result.node[att].drop(result.node[att].index[0], inplace=True)
+ result.node[att].drop(result.node[att].index[0], inplace=True) # noqa: PD002
self._linear_result.node[att] = self._linear_result.node[
att
].append(result.node[att])
@@ -553,12 +553,12 @@ def KeepLinearResult(
].sort_index()
for att in link_attributes:
- result.link[att].drop(result.link[att].index[0], inplace=True)
+ result.link[att].drop(result.link[att].index[0], inplace=True) # noqa: PD002
self._linear_result.link[att] = self._linear_result.link[att].append(
result.link[att]
)
- def dumpPartOfResult(self):
+ def dumpPartOfResult(self): # noqa: C901, N802, D102
limit_size = self.registry.settings['limit_result_file_size']
limit_size_byte = limit_size * 1024 * 1024
@@ -572,7 +572,7 @@ def dumpPartOfResult(self):
att_size = sys.getsizeof(self._linear_result.link[att])
total_size += att_size
- print('total size= ' + repr(total_size / 1024 / 1024))
+ print('total size= ' + repr(total_size / 1024 / 1024)) # noqa: T201
if total_size >= limit_size_byte:
dump_result = SimulationResults()
@@ -582,7 +582,7 @@ def dumpPartOfResult(self):
# just to make sure. it obly add tens of micro seconds for each
# att
- self._linear_result.node[att].sort_index(inplace=True)
+ self._linear_result.node[att].sort_index(inplace=True) # noqa: PD002
att_result = self._linear_result.node[att]
if att_result.empty:
continue
@@ -599,7 +599,7 @@ def dumpPartOfResult(self):
if len(last_valid_time) > 0:
last_valid_time = last_valid_time[-2]
else:
- print(att_time_index)
+ print(att_time_index) # noqa: T201
last_valid_time = att_time_index[-2]
dump_result.node[att] = att_result.loc[:last_valid_time]
@@ -608,13 +608,13 @@ def dumpPartOfResult(self):
)
self._linear_result.node[att].drop(
att_result.index[: last_valid_time_index + 1],
- inplace=True,
+ inplace=True, # noqa: PD002
)
for att in self._linear_result.link:
# just to make sure. it obly add tens of micro seconds for each
# att
- self._linear_result.link[att].sort_index(inplace=True)
+ self._linear_result.link[att].sort_index(inplace=True) # noqa: PD002
att_result = self._linear_result.link[att]
if att_result.empty:
continue
@@ -639,7 +639,7 @@ def dumpPartOfResult(self):
)
self._linear_result.link[att].drop(
att_result.index[: last_valid_time_index + 1],
- inplace=True,
+ inplace=True, # noqa: PD002
)
dump_file_index = len(self.registry.result_dump_file_list) + 1
@@ -652,21 +652,21 @@ def dumpPartOfResult(self):
result_dump_file_name = (
self.registry.scenario_name + '.part' + str(dump_file_index)
)
- result_dump_file_dst = os.path.join(
+ result_dump_file_dst = os.path.join( # noqa: PTH118
self.registry.settings.process['result_directory'],
result_dump_file_name,
)
- with open(result_dump_file_dst, 'wb') as resul_file:
+ with open(result_dump_file_dst, 'wb') as resul_file: # noqa: PTH123
pickle.dump(dump_result, resul_file)
dump_list_file_name = self.registry.scenario_name + '.dumplist'
- list_file_dst = os.path.join(
+ list_file_dst = os.path.join( # noqa: PTH118
self.registry.settings.process['result_directory'],
dump_list_file_name,
)
- with open(list_file_dst, list_file_opening_mode) as part_list_file:
+ with open(list_file_dst, list_file_opening_mode) as part_list_file: # noqa: PTH123
part_list_file.writelines([result_dump_file_name])
self.registry.result_dump_file_list.append(result_dump_file_name)
diff --git a/modules/systemPerformance/REWET/REWET/__init__.py b/modules/systemPerformance/REWET/REWET/__init__.py
index 5e0f08fa7..5ecedd637 100644
--- a/modules/systemPerformance/REWET/REWET/__init__.py
+++ b/modules/systemPerformance/REWET/REWET/__init__.py
@@ -1,3 +1,3 @@
-from REWET import Input
+from REWET import Input # noqa: N999, D104
__version__ = '0.1.1'
diff --git a/modules/systemPerformance/REWET/REWET/initial.py b/modules/systemPerformance/REWET/REWET/initial.py
index b9b9c5ecc..637d4afa1 100644
--- a/modules/systemPerformance/REWET/REWET/initial.py
+++ b/modules/systemPerformance/REWET/REWET/initial.py
@@ -1,7 +1,7 @@
"""Created on Tue Jun 1 21:04:18 2021
@author: snaeimi
-"""
+""" # noqa: D400
import logging
import os
@@ -23,16 +23,16 @@
logging.basicConfig(level=50)
-class Starter:
- def createProjectFile(self, project_settings, damage_list, project_file_name):
+class Starter: # noqa: D101
+ def createProjectFile(self, project_settings, damage_list, project_file_name): # noqa: N802, D102
project = Project(project_settings, damage_list)
- project_file_addr = os.path.join(
+ project_file_addr = os.path.join( # noqa: PTH118
project_settings.process['result_directory'], project_file_name
)
- with open(project_file_addr, 'wb') as f:
+ with open(project_file_addr, 'wb') as f: # noqa: PTH123
pickle.dump(project, f)
- def run(self, project_file=None):
+ def run(self, project_file=None): # noqa: C901
"""Runs the ptogram. It initiates the Settings class and based on the
settings, run the program in either single scenario, multiple serial or
multiple parallel mode.
@@ -46,12 +46,12 @@ def run(self, project_file=None):
-------
None.
- """
+ """ # noqa: D205, D401
settings = Settings()
if project_file is not None:
project_file = str(project_file)
- if type(project_file) == str:
+ if type(project_file) == str: # noqa: E721
if project_file.split('.')[-1].lower() == 'prj':
settings.importProject(project_file)
elif project_file.split('.')[-1].lower() == 'json':
@@ -59,7 +59,7 @@ def run(self, project_file=None):
project_file = None
else:
raise ValueError(
- 'The input file has an unrgnizable extension: {}'.format(
+ 'The input file has an unrgnizable extension: {}'.format( # noqa: EM103
project_file.split('.')[-1].lower()
)
)
@@ -81,7 +81,7 @@ def run(self, project_file=None):
if settings.process['number_of_damages'] == 'multiple':
damage_list_size = len(damage_list)
for i in range(damage_list_size):
- print(i, flush=True)
+ print(i, flush=True) # noqa: T201
settings.initializeScenarioSettings(
i
) # initialize scenario-specific settings for each list/useful for sensitivity analysis
@@ -114,16 +114,16 @@ def run(self, project_file=None):
tank_damage_file_name=tank_damage_name,
)
t2 = time.time()
- print('Time of Single run is: ' + repr((t2 - t1) / 3600) + '(hr)')
+ print('Time of Single run is: ' + repr((t2 - t1) / 3600) + '(hr)') # noqa: T201
else:
- raise ValueError("Unknown value for settings['number_of_damages']")
+ raise ValueError("Unknown value for settings['number_of_damages']") # noqa: EM101, TRY003
elif settings.process['number_of_proccessor'] > 1:
self.run_mpi(settings)
else:
- raise ValueError('Number of processor must be equal to or more than 1')
+ raise ValueError('Number of processor must be equal to or more than 1') # noqa: EM101, TRY003
- def run_local_single(
+ def run_local_single( # noqa: C901
self,
file_name,
scenario_name,
@@ -161,8 +161,8 @@ def run_local_single(
-------
None.
- """
- print(
+ """ # noqa: D401
+ print( # noqa: T201
scenario_name
+ ' - '
+ file_name
@@ -172,27 +172,27 @@ def run_local_single(
+ str(pump_damage_file_name),
flush=True,
)
- if settings.process['number_of_proccessor'] > 1 and worker_rank == None:
- raise ValueError(
- 'for multiple processor analysis, worker_rank_must be provided'
+ if settings.process['number_of_proccessor'] > 1 and worker_rank == None: # noqa: E711
+ raise ValueError( # noqa: TRY003
+ 'for multiple processor analysis, worker_rank_must be provided' # noqa: EM101
)
- if type(file_name) != str:
+ if type(file_name) != str: # noqa: E721
file_name = str(
file_name
) # for number-only names to convert from int/float to str
- if type(tank_damage_file_name) != str:
+ if type(tank_damage_file_name) != str: # noqa: E721
tank_damage_file_name = str(
tank_damage_file_name
) # for number-only names to convert from int/float to str
- if type(nodal_damage_file_name) != str:
+ if type(nodal_damage_file_name) != str: # noqa: E721
nodal_damage_file_name = str(
nodal_damage_file_name
) # for number-only names to convert from int/float to str
- if type(pump_damage_file_name) != str:
+ if type(pump_damage_file_name) != str: # noqa: E721
pump_damage_file_name = str(
pump_damage_file_name
) # for number-only names to convert from int/float to str
@@ -226,15 +226,15 @@ def run_local_single(
settings.process['pipe_damage_file_directory'], pump_damage_file_name
)
else:
- raise ValueError(
- "Unknown value for settings['Pipe_damage_input_method']"
+ raise ValueError( # noqa: TRY003
+ "Unknown value for settings['Pipe_damage_input_method']" # noqa: EM101
)
if (
- pipe_damages.empty == True
- and node_damages.empty == True
- and tank_damages.empty == True
- and pump_damages.empty == True
+ pipe_damages.empty == True # noqa: E712
+ and node_damages.empty == True # noqa: E712
+ and tank_damages.empty == True # noqa: E712
+ and pump_damages.empty == True # noqa: E712
and settings.process['ignore_empty_damage']
):
return 2 # means it didn't run due to lack of any damage in pipe lines
@@ -266,9 +266,9 @@ def run_local_single(
self.registry.damage = self.damage
self.damage.pipe_all_damages = pipe_damages
self.damage.node_damage = node_damages
- if tank_damages.empty == False:
+ if tank_damages.empty == False: # noqa: E712
self.damage.tank_damage = tank_damages['Tank_ID']
- if pump_damages.empty == False:
+ if pump_damages.empty == False: # noqa: E712
self.damage.damaged_pumps = pump_damages['Pump_ID']
restoration = Restoration(
@@ -293,7 +293,7 @@ def run_local_single(
io.save_single(settings, result, scenario_name, registry)
return 1
- def run_mpi(self, settings):
+ def run_mpi(self, settings): # noqa: C901, D102
import mpi4py
from mpi4py import MPI
@@ -305,7 +305,7 @@ def run_mpi(self, settings):
settings.process['pipe_damage_file_directory'],
)
- if settings.process['mpi_resume'] == True:
+ if settings.process['mpi_resume'] == True: # noqa: E712
pipe_damage_list = pipe_damage_list.set_index('Scenario Name')
# _done_file = pd.read_csv('done.csv')
# _done_file = _done_file.transpose().reset_index().transpose().set_index(0)
@@ -358,9 +358,9 @@ def run_mpi(self, settings):
],
)
- iContinue = True
+ iContinue = True # noqa: N806
while iContinue:
- if (time.time() - time_jobs_saved) > 120:
+ if (time.time() - time_jobs_saved) > 120: # noqa: PLR2004
jobs.to_excel(
'temp-jobs.xlsx'
) # only for more information about the latest job status for the user in the real time
@@ -371,17 +371,17 @@ def run_mpi(self, settings):
recieved_msg = comm.recv(status=status)
worker_rank = status.Get_source()
if (
- recieved_msg == 1 or recieved_msg == 2 or recieved_msg == 3
+ recieved_msg == 1 or recieved_msg == 2 or recieved_msg == 3 # noqa: PLR1714, PLR2004
): # check if the job is done
msg_interpretation = None
if recieved_msg == 1:
msg_interpretation = 'done'
- elif recieved_msg == 2:
+ elif recieved_msg == 2: # noqa: PLR2004
msg_interpretation = 'done w/o simulation'
- elif recieved_msg == 3:
+ elif recieved_msg == 3: # noqa: PLR2004
msg_interpretation = 'exception happened'
- print(
+ print( # noqa: T201
'messaged received= '
+ repr(msg_interpretation)
+ ' rank recivied= '
@@ -399,9 +399,9 @@ def run_mpi(self, settings):
jobs_index = workers.loc[worker_rank]
if recieved_msg == 1:
jobs.loc[jobs_index, 'Done'] = 'True'
- elif recieved_msg == 2:
+ elif recieved_msg == 2: # noqa: PLR2004
jobs.loc[jobs_index, 'Done'] = 'No need'
- elif recieved_msg == 3:
+ elif recieved_msg == 3: # noqa: PLR2004
jobs.loc[jobs_index, 'Done'] = 'exception'
jobs.loc[jobs_index, 'time_confirmed'] = time.time()
@@ -419,7 +419,7 @@ def run_mpi(self, settings):
jobs.loc[jobs_index, 'time_confirmed']
- jobs.loc[jobs_index, 'time_assigned']
)
- with open(
+ with open( # noqa: PTH123
'done.csv', 'a', encoding='utf-8', buffering=1000000
) as f: # shows the order of done jobs
f.write(
@@ -445,11 +445,11 @@ def run_mpi(self, settings):
if (
len(not_assigned_data) > 0
and len(free_workers) > 0
- and time_constraint == False
+ and time_constraint == False # noqa: E712
):
jobs_index = not_assigned_data.index[0]
worker_rank = free_workers.index[0]
- print(
+ print( # noqa: T201
'trying to send '
+ repr(jobs_index)
+ ' to '
@@ -466,7 +466,7 @@ def run_mpi(self, settings):
'%Y-%m-%d %H:%M:%S',
time.localtime(jobs.loc[jobs_index, 'time_assigned']),
)
- with open(
+ with open( # noqa: PTH123
'runing.csv', 'a', encoding='utf-8', buffering=1000000
) as f:
f.write(
@@ -481,11 +481,11 @@ def run_mpi(self, settings):
)
binary_vector = jobs['Done'] == 'False'
- iContinue = binary_vector.any() and (not time_constraint)
+ iContinue = binary_vector.any() and (not time_constraint) # noqa: N806
# Finish workers with sending them a dummy data with tag=100 (death tag)
for i in range(1, settings.process['number_of_proccessor']):
- print('Death msg (tag=100) is sent to all workers. RIP!', flush=True)
+ print('Death msg (tag=100) is sent to all workers. RIP!', flush=True) # noqa: T201
comm.send('None', dest=i, tag=100)
jobs['time_lapsed'] = jobs['time_confirmed'] - jobs['time_assigned']
jobs['time_assigned'] = jobs.apply(
@@ -501,25 +501,25 @@ def run_mpi(self, settings):
axis=1,
)
jobs.to_excel('jobs.xlsx')
- print('MAIN NODE FINISHED. Going under!', flush=True)
+ print('MAIN NODE FINISHED. Going under!', flush=True) # noqa: T201
else:
worker_exit_flag = None
while True:
if comm.iprobe(source=0):
status = MPI.Status()
- print(
+ print( # noqa: T201
'trying to receive msg. -> rank= ' + repr(comm.rank),
flush=True,
)
scenario_index = comm.recv(source=0, status=status)
- if status.Get_tag() != 100:
+ if status.Get_tag() != 100: # noqa: PLR2004
scenario_name = pipe_damage_list.loc[
scenario_index, 'Scenario Name'
]
settings.initializeScenarioSettings(scenario_index)
- print(
+ print( # noqa: T201
'Rank= '
+ repr(comm.rank)
+ ' is assigned to '
@@ -546,23 +546,23 @@ def run_mpi(self, settings):
pump_damage_file_name=pump_damage,
tank_damage_file_name=tank_damage_name,
)
- print(
+ print( # noqa: T201
'run_flag for worker: '
+ repr(comm.rank)
+ ' --> '
+ repr(run_flag)
)
comm.isend(run_flag, dest=0)
- except Exception:
+ except Exception: # noqa: BLE001
error_dump_file = None
- if type(scenario_name) == str:
+ if type(scenario_name) == str: # noqa: E721
error_dump_file = 'dump_' + scenario_name + '.pkl'
else:
error_dump_file = (
'dump_' + repr(scenario_name) + '.pkl'
)
- with open(error_dump_file, 'wb') as f:
+ with open(error_dump_file, 'wb') as f: # noqa: PTH123
pickle.dump(self, f)
comm.isend(3, dest=0)
@@ -577,17 +577,17 @@ def run_mpi(self, settings):
]:
worker_exit_flag = 'Maximum time reached.'
break
- print(
+ print( # noqa: T201
repr(worker_exit_flag) + " I'm OUT -> Rank= " + repr(comm.rank),
flush=True,
)
- def checkArgument(self, argv):
- if len(argv) > 2:
- print('REWET USAGE is as [./REWET Project.prj: optional]')
- if len(argv) == 1:
+ def checkArgument(self, argv): # noqa: N802, D102
+ if len(argv) > 2: # noqa: PLR2004
+ print('REWET USAGE is as [./REWET Project.prj: optional]') # noqa: T201
+ if len(argv) == 1: # noqa: SIM103
return False
- else:
+ else: # noqa: RET505
return True
@@ -597,9 +597,9 @@ def checkArgument(self, argv):
start = Starter()
if_project = start.checkArgument(sys.argv)
if if_project:
- if os.path.exists(sys.argv[1]):
+ if os.path.exists(sys.argv[1]): # noqa: PTH110
tt = start.run(sys.argv[1])
else:
- print('Project file address is not valid: ' + repr(sys.argv[1]))
+ print('Project file address is not valid: ' + repr(sys.argv[1])) # noqa: T201
else:
tt = start.run()
diff --git a/modules/systemPerformance/REWET/REWET/main.py b/modules/systemPerformance/REWET/REWET/main.py
index 39d65a887..7cb8535cb 100644
--- a/modules/systemPerformance/REWET/REWET/main.py
+++ b/modules/systemPerformance/REWET/REWET/main.py
@@ -7,7 +7,7 @@
so one can run initial.py to run REWET. currently, REWET's GUI still works with
initial.py. Main.py is going to be the most developed tool.
-"""
+""" # noqa: D400
import argparse
import os
@@ -16,7 +16,7 @@
from initial import Starter
if __name__ == '__main__':
- argParser = argparse.ArgumentParser(
+ argParser = argparse.ArgumentParser( # noqa: N816
prog='REWET V0.2',
description='REstoration tool for Restoration of Water after Event Tool is a package for modeling damages and restoration in water network. You can specify settings in with providing a JSON. An example JSON file is provided in example folder. Modify the example folder and provide its path as an input. If not provided, the default settings values from the input/settings.py will be ran. thus, you can alternatively modify values in settings for a single run.',
)
@@ -33,49 +33,49 @@
# No file is pacified, so the default values in settings file is going to
# be ran.
- if parse_namespace.json == None and parse_namespace.project == None:
+ if parse_namespace.json == None and parse_namespace.project == None: # noqa: E711
import warnings
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=FutureWarning)
starter.run()
sys.exit(0)
- elif parse_namespace.json != None and parse_namespace.project == None:
+ elif parse_namespace.json != None and parse_namespace.project == None: # noqa: E711
if parse_namespace.json.split('.')[-1].upper() != 'JSON':
- print(
+ print( # noqa: T201
'ERROR in json file name: ',
parse_namespace.json,
'The json file must have json extention',
)
sys.exit(0)
- elif not os.path.exists(parse_namespace.json):
- print('ERROR in json file: ', parse_namespace.json, 'does not exist')
+ elif not os.path.exists(parse_namespace.json): # noqa: PTH110
+ print('ERROR in json file: ', parse_namespace.json, 'does not exist') # noqa: T201
else:
starter.run(parse_namespace.json)
- elif parse_namespace.json == None and parse_namespace.project != None:
+ elif parse_namespace.json == None and parse_namespace.project != None: # noqa: E711
if parse_namespace.project.split('.')[-1].upper() != 'PRJ':
- print(
+ print( # noqa: T201
'ERROR in project file name: ',
parse_namespace.project,
'The project file must have PRJ extention',
)
sys.exit(0)
- elif not os.path.exists(parse_namespace.project):
- print(
+ elif not os.path.exists(parse_namespace.project): # noqa: PTH110
+ print( # noqa: T201
'ERROR in project file: ', parse_namespace.project, 'does not exist'
)
else:
starter.run(parse_namespace.project)
else:
- print(
+ print( # noqa: T201
'ERROR in arguments\n',
'Either of the json or project file arguments must be used',
)
else:
- print(
+ print( # noqa: T201
'Main File has been ran with not being the main module (i.e.,\
__name__ is not "__main__"'
)
diff --git a/modules/systemPerformance/REWET/REWET/repair.py b/modules/systemPerformance/REWET/REWET/repair.py
index 6851cd300..362de0a94 100644
--- a/modules/systemPerformance/REWET/REWET/repair.py
+++ b/modules/systemPerformance/REWET/REWET/repair.py
@@ -1,7 +1,7 @@
"""Created on Tue Feb 2 20:22:09 2021
@author: snaeimi
-"""
+""" # noqa: D400
import math
from collections import OrderedDict
@@ -50,28 +50,28 @@
}
-class Repair:
+class Repair: # noqa: D101
def __init__(self, registry):
self._registry = registry
- def closeSecondLeakingPipe(self, damage_node_name, wn):
+ def closeSecondLeakingPipe(self, damage_node_name, wn): # noqa: N802, D102
if (
- self._registry.getDamageData('PIPE', False).loc[
+ self._registry.getDamageData('PIPE', False).loc[ # noqa: FBT003
damage_node_name, 'damage_type'
]
!= 'leak'
):
raise ValueError('Damage type is not leak in node ' + damage_node_name)
- pipe_A_name, pipe_B_name, orginal_pipe_name = self._registry.getLeakData(
+ pipe_A_name, pipe_B_name, orginal_pipe_name = self._registry.getLeakData( # noqa: N806
damage_node_name
)
- pipe_B = wn.get_link(pipe_B_name)
+ pipe_B = wn.get_link(pipe_B_name) # noqa: N806
pipe_B.status = LinkStatus.Closed
pipe_B.initial_status = LinkStatus.Closed
- def bypassPipe(
+ def bypassPipe( # noqa: N802, D102
self,
damage_node_name,
middle_pipe_size,
@@ -84,26 +84,26 @@ def bypassPipe(
# raise ValueError('Damage type is not leak in node '+damage_node_name)
if damage_type == 'leak':
- pipe_A_name, pipe_B_name, orginal_pipe_name = self._registry.getLeakData(
+ pipe_A_name, pipe_B_name, orginal_pipe_name = self._registry.getLeakData( # noqa: N806
damage_node_name
)
elif damage_type == 'break':
- pipe_A_name, pipe_B_name, orginal_pipe_name, node_A_name, node_B_name = (
+ pipe_A_name, pipe_B_name, orginal_pipe_name, node_A_name, node_B_name = ( # noqa: N806
self._registry.getBreakData(damage_node_name)
)
org_pipe_data = self._registry.getOriginalPipenodes(orginal_pipe_name)
- orginal_node_A_name = org_pipe_data['start_node_name']
- orginal_node_B_name = org_pipe_data['end_node_name']
+ orginal_node_A_name = org_pipe_data['start_node_name'] # noqa: N806
+ orginal_node_B_name = org_pipe_data['end_node_name'] # noqa: N806
orginal_pipe_length = org_pipe_data['length']
orginal_pipe_roughness = org_pipe_data['roughness']
- if length != None:
+ if length != None: # noqa: E711
pipe_length = length
else:
pipe_length = orginal_pipe_length
- if friction != None:
+ if friction != None: # noqa: E711
pipe_friction = friction
else:
pipe_friction = orginal_pipe_roughness
@@ -134,11 +134,11 @@ def bypassPipe(
cur_damage_type = cur_damage['damage_type']
if cur_damage_type == 'leak':
- pipe_A_name, pipe_B_name, orginal_pipe_name = (
+ pipe_A_name, pipe_B_name, orginal_pipe_name = ( # noqa: N806
self._registry.getLeakData(cur_damage_node_name)
)
- pipe_B = wn.get_link(pipe_B_name)
+ pipe_B = wn.get_link(pipe_B_name) # noqa: N806, F841
elif cur_damage_type == 'break':
pass
@@ -151,28 +151,28 @@ def bypassPipe(
)
# local reconnection, for instance, for fire truck reconnection
- def reconnectPipe(self, damage_node_name, middle_pipe_size, damage_type, wn):
+ def reconnectPipe(self, damage_node_name, middle_pipe_size, damage_type, wn): # noqa: N802, D102
history = OrderedDict()
if damage_type == 'leak':
- pipe_A_name, pipe_B_name, orginal_pipe_name = self._registry.getLeakData(
+ pipe_A_name, pipe_B_name, orginal_pipe_name = self._registry.getLeakData( # noqa: N806
damage_node_name
)
- pipe_A = wn.get_link(pipe_A_name)
- pipe_B = wn.get_link(pipe_B_name)
+ pipe_A = wn.get_link(pipe_A_name) # noqa: N806
+ pipe_B = wn.get_link(pipe_B_name) # noqa: N806
if pipe_A.status == 1:
history['NON_COL_PIPE_CLOSED_FROM_OPEN'] = pipe_A_name
- elif pipe_A.status == 3:
+ elif pipe_A.status == 3: # noqa: PLR2004
history['NON_COL_PIPE_CLOSED_FROM_CV'] = pipe_A_name
pipe_A.initial_status = LinkStatus(0)
- if middle_pipe_size == None:
+ if middle_pipe_size == None: # noqa: E711
middle_pipe_size = pipe_A.diameter
- beg_node_of_pipe_A = pipe_A.start_node
- end_node_of_pipe_B = pipe_B.end_node
+ beg_node_of_pipe_A = pipe_A.start_node # noqa: N806
+ end_node_of_pipe_B = pipe_B.end_node # noqa: N806
new_length = pipe_A.length + pipe_B.length
# For the sake of multiple damages in one pipe the following line is marked the the line after it is added
@@ -190,18 +190,18 @@ def reconnectPipe(self, damage_node_name, middle_pipe_size, damage_type, wn):
history['NON_COL_ADDED_PIPE'] = new_pipe_name
elif damage_type == 'break':
- pipe_A_name, pipe_B_name, orginal_pipe_name, node_A_name, node_B_name = (
+ pipe_A_name, pipe_B_name, orginal_pipe_name, node_A_name, node_B_name = ( # noqa: N806
self._registry.getBreakData(damage_node_name)
)
- pipe_A = wn.get_link(pipe_A_name)
- pipe_B = wn.get_link(pipe_B_name)
+ pipe_A = wn.get_link(pipe_A_name) # noqa: N806
+ pipe_B = wn.get_link(pipe_B_name) # noqa: N806
- if middle_pipe_size == None:
+ if middle_pipe_size == None: # noqa: E711
middle_pipe_size = pipe_A.diameter
- beg_node_of_pipe_A = pipe_A.start_node
- end_node_of_pipe_B = pipe_B.end_node
+ beg_node_of_pipe_A = pipe_A.start_node # noqa: N806
+ end_node_of_pipe_B = pipe_B.end_node # noqa: N806
new_length = pipe_A.length + pipe_B.length
# For the sake of multiple damages in one pipe the following line is marked the the line after it is added
@@ -224,7 +224,7 @@ def reconnectPipe(self, damage_node_name, middle_pipe_size, damage_type, wn):
damage_node_name, history, 'reconnect'
)
- def removeLeak(self, damage_node_name, damage_type, wn, factor=1):
+ def removeLeak(self, damage_node_name, damage_type, wn, factor=1): # noqa: C901, N802, D102
history = OrderedDict()
opening = 1 - factor
@@ -242,27 +242,27 @@ def removeLeak(self, damage_node_name, damage_type, wn, factor=1):
cur_damage_type = damage_type_list[cur_damage_node_name]
if cur_damage_type == 'leak':
- pipe_A_name, pipe_B_name, orginal_pipe_name = (
+ pipe_A_name, pipe_B_name, orginal_pipe_name = ( # noqa: N806
self._registry.getLeakData(cur_damage_node_name)
)
- node_A = wn.get_node(cur_damage_node_name)
+ node_A = wn.get_node(cur_damage_node_name) # noqa: N806
if pipe_B_name in wn.pipe_name_list:
- pipe_B = wn.get_link(pipe_B_name)
+ pipe_B = wn.get_link(pipe_B_name) # noqa: N806
if pipe_B.status == 1:
history['PIPE_CLOSED_FROM_OPEN'] = pipe_B_name
- elif pipe_B.status == 3:
+ elif pipe_B.status == 3: # noqa: PLR2004
history['PIPE_CLOSED_FROM_CV'] = pipe_B_name
pipe_B.initial_status = LinkStatus(0)
- history['NODE_A_DEMAND_BEFORE'] = node_A._leak_area
- node_A_leak_area = opening * node_A._leak_area
+ history['NODE_A_DEMAND_BEFORE'] = node_A._leak_area # noqa: SLF001
+ node_A_leak_area = opening * node_A._leak_area # noqa: SLF001, N806
node_A.add_leak(wn, node_A_leak_area, discharge_coeff=1)
- history['NODE_A_DEMAND_AFTER'] = node_A._leak_area
+ history['NODE_A_DEMAND_AFTER'] = node_A._leak_area # noqa: SLF001
- if abs(opening) < 0.001:
+ if abs(opening) < 0.001: # noqa: PLR2004
node_A.remove_leak(wn)
history['NODE_A'] = 'REMOVED'
else:
@@ -270,11 +270,11 @@ def removeLeak(self, damage_node_name, damage_type, wn, factor=1):
elif cur_damage_type == 'break':
(
- pipe_A_name,
- pipe_B_name,
+ pipe_A_name, # noqa: N806
+ pipe_B_name, # noqa: N806
orginal_pipe_name,
- node_A_name,
- node_B_name,
+ node_A_name, # noqa: N806
+ node_B_name, # noqa: N806
) = self._registry.getBreakData(cur_damage_node_name)
if cur_damage_node_name != node_A_name:
raise ValueError(
@@ -284,30 +284,30 @@ def removeLeak(self, damage_node_name, damage_type, wn, factor=1):
+ repr(node_A_name)
)
- node_A = wn.get_node(cur_damage_node_name)
+ node_A = wn.get_node(cur_damage_node_name) # noqa: N806
- history['NODE_A_DEMAND_BEFORE'] = node_A._leak_area
- node_A_leak_area = opening * node_A._leak_area
+ history['NODE_A_DEMAND_BEFORE'] = node_A._leak_area # noqa: SLF001
+ node_A_leak_area = opening * node_A._leak_area # noqa: SLF001, N806
node_A.add_leak(wn, node_A_leak_area, discharge_coeff=1)
- history['NODE_A_DEMAND_AFTER'] = node_A._leak_area
+ history['NODE_A_DEMAND_AFTER'] = node_A._leak_area # noqa: SLF001
- if abs(opening) < 0.001:
+ if abs(opening) < 0.001: # noqa: PLR2004
node_A.remove_leak(wn)
- node_A._leak_area = 0
+ node_A._leak_area = 0 # noqa: SLF001
history['NODE_A'] = 'REMOVED'
else:
history['NODE_A'] = 'REDUCED'
- node_B = wn.get_node(node_B_name)
+ node_B = wn.get_node(node_B_name) # noqa: N806
- history['NODE_B_DEMAND_BEFORE'] = node_B._leak_area
- node_B_leak_area = opening * node_B._leak_area
+ history['NODE_B_DEMAND_BEFORE'] = node_B._leak_area # noqa: SLF001
+ node_B_leak_area = opening * node_B._leak_area # noqa: SLF001, N806
node_B.add_leak(wn, node_B_leak_area, discharge_coeff=1)
- history['NODE_B_DEMAND_AFTER'] = node_B._leak_area
+ history['NODE_B_DEMAND_AFTER'] = node_B._leak_area # noqa: SLF001
- if abs(opening) < 0.001:
+ if abs(opening) < 0.001: # noqa: PLR2004
node_B.remove_leak(wn)
- node_B._leak_area = 0
+ node_B._leak_area = 0 # noqa: SLF001
history['NODE_B'] = 'REMOVED'
else:
history['NODE_B'] = 'REDUCED'
@@ -319,15 +319,15 @@ def removeLeak(self, damage_node_name, damage_type, wn, factor=1):
damage_node_name, history, 'removeLeak'
)
- def addReservoir(self, damage_node_name, damage_type, _type, pump, wn):
+ def addReservoir(self, damage_node_name, damage_type, _type, pump, wn): # noqa: C901, N802, D102
history = OrderedDict()
if damage_type == 'leak':
- pipe_A_name, pipe_B_name, orginal_pipe_name = self._registry.getLeakData(
+ pipe_A_name, pipe_B_name, orginal_pipe_name = self._registry.getLeakData( # noqa: N806
damage_node_name
)
elif damage_type == 'break':
- pipe_A_name, pipe_B_name, orginal_pipe_name, node_A_name, node_B_name = (
+ pipe_A_name, pipe_B_name, orginal_pipe_name, node_A_name, node_B_name = ( # noqa: N806
self._registry.getBreakData(damage_node_name)
)
else:
@@ -335,27 +335,27 @@ def addReservoir(self, damage_node_name, damage_type, _type, pump, wn):
'Unknown damage type in ' + damage_node_name + ', ' + damage_type
)
- pipe_A = wn.get_link(pipe_A_name)
- pipe_B = wn.get_link(pipe_B_name)
- first_node_pipe_A = pipe_A.start_node
- second_node_pipe_B = pipe_B.end_node
+ pipe_A = wn.get_link(pipe_A_name) # noqa: N806
+ pipe_B = wn.get_link(pipe_B_name) # noqa: N806
+ first_node_pipe_A = pipe_A.start_node # noqa: N806
+ second_node_pipe_B = pipe_B.end_node # noqa: N806
- _coord_A = (
+ _coord_A = ( # noqa: N806
first_node_pipe_A.coordinates[0] + 10,
first_node_pipe_A.coordinates[1] + 10,
)
- new_reservoir_A = first_node_pipe_A.name + '-added'
+ new_reservoir_A = first_node_pipe_A.name + '-added' # noqa: N806
wn.add_reservoir(
new_reservoir_A,
base_head=first_node_pipe_A.elevation,
coordinates=_coord_A,
)
- _coord_B = (
+ _coord_B = ( # noqa: N806
second_node_pipe_B.coordinates[0] + 10,
second_node_pipe_B.coordinates[1] + 10,
)
- new_reservoir_B = second_node_pipe_B.name + '-added'
+ new_reservoir_B = second_node_pipe_B.name + '-added' # noqa: N806
wn.add_reservoir(
new_reservoir_B,
base_head=second_node_pipe_B.elevation,
@@ -364,7 +364,7 @@ def addReservoir(self, damage_node_name, damage_type, _type, pump, wn):
history['ADDED_RESERVOIR_A'] = new_reservoir_A
history['ADDED_RESERVOIR_B'] = new_reservoir_B
- if _type == None:
+ if _type == None: # noqa: E711
_pipe_size = pipe_A.diameter
new_pipe_name_1 = damage_node_name + '-lK1'
new_pipe_name_2 = damage_node_name + '-lK2'
@@ -424,8 +424,8 @@ def addReservoir(self, damage_node_name, damage_type, _type, pump, wn):
new_valve_name_1 = damage_node_name + '-RV1'
new_valve_name_2 = damage_node_name + '-RV2'
- new_RP_middle_name1 = damage_node_name + '-mn1'
- new_RP_middle_name2 = damage_node_name + '-mn2'
+ new_RP_middle_name1 = damage_node_name + '-mn1' # noqa: N806
+ new_RP_middle_name2 = damage_node_name + '-mn2' # noqa: N806
coord1 = (
first_node_pipe_A.coordinates[0] + 5,
@@ -482,8 +482,8 @@ def addReservoir(self, damage_node_name, damage_type, _type, pump, wn):
setting=0.2500,
)
- res_A = wn.get_node(new_reservoir_A)
- res_B = wn.get_node(new_reservoir_B)
+ res_A = wn.get_node(new_reservoir_A) # noqa: N806
+ res_B = wn.get_node(new_reservoir_B) # noqa: N806
res_A.base_head = res_A.base_head + 20
res_B.base_head = res_B.base_head + 20
@@ -508,7 +508,7 @@ def addReservoir(self, damage_node_name, damage_type, _type, pump, wn):
)
else:
- raise ValueError('Unknown Reservoir type')
+ raise ValueError('Unknown Reservoir type') # noqa: EM101, TRY003
damage_data = self._registry.getDamageData('pipe', iCopy=False)
redefined_damage_data = damage_data[
@@ -518,15 +518,15 @@ def addReservoir(self, damage_node_name, damage_type, _type, pump, wn):
for cur_damage_node_name, cur_damage in redefined_damage_data.iterrows():
cur_damage_type = cur_damage['damage_type']
if cur_damage_type == 'leak':
- pipe_A_name, pipe_B_name, orginal_pipe_name = (
+ pipe_A_name, pipe_B_name, orginal_pipe_name = ( # noqa: N806
self._registry.getLeakData(cur_damage_node_name)
)
- pipe_B = wn.get_link(pipe_B_name)
+ pipe_B = wn.get_link(pipe_B_name) # noqa: N806
if pipe_B.status == 1:
history['PIPE_CLOSED_FROM_OPEN'] = pipe_B_name
- elif pipe_B.status == 3:
+ elif pipe_B.status == 3: # noqa: PLR2004
history['PIPE_CLOSED_FROM_CV'] = pipe_B_name
pipe_B.initial_status = LinkStatus(0)
@@ -540,7 +540,7 @@ def addReservoir(self, damage_node_name, damage_type, _type, pump, wn):
damage_node_name, history, 'addReservoir'
)
- def removeDemand(self, node_name, factor, wn):
+ def removeDemand(self, node_name, factor, wn): # noqa: N802, D102
history = OrderedDict()
if factor < 0 or factor > 1:
@@ -562,11 +562,11 @@ def removeDemand(self, node_name, factor, wn):
node.demand_timeseries_list[0].base_value = new_demand
history['NODE_DEMAND_AFTER'] = new_demand
- self._registry.addFunctionDataToRestorationRegistry(
+ self._registry.addFunctionDataToRestorationRegistry( # noqa: RET503
node_name, history, 'removeDemand'
)
- def removeExplicitNodalLeak(self, node_name, factor, wn):
+ def removeExplicitNodalLeak(self, node_name, factor, wn): # noqa: N802, D102
history = OrderedDict()
damage_data = self._registry.getEquavalantDamageHistory(node_name)
pipe_name = damage_data['new_pipe_name']
@@ -589,22 +589,22 @@ def removeExplicitNodalLeak(self, node_name, factor, wn):
node_name, history, 'removeExplicitLeak'
)
- def removeNodeTemporaryRepair(self, damage_node_name, wn):
+ def removeNodeTemporaryRepair(self, damage_node_name, wn): # noqa: N802, D102
if_damage_removed = False
- restoration_table = self._registry._restoration_table
+ restoration_table = self._registry._restoration_table # noqa: SLF001
selected_restoration_table = restoration_table[
restoration_table['node_name'] == damage_node_name
]
- for ind, rec_id in selected_restoration_table.record_index.items():
- change_list = self._registry._record_registry[rec_id]
+ for ind, rec_id in selected_restoration_table.record_index.items(): # noqa: B007, PERF102
+ change_list = self._registry._record_registry[rec_id] # noqa: SLF001
- for change, name in ((k, change_list[k]) for k in reversed(change_list)):
+ for change, name in ((k, change_list[k]) for k in reversed(change_list)): # noqa: B007
if change == 'removeExplicitLeak':
pass
- elif change == 'NODE_DEMAND_AFTER' or change == 'NODE_DEMAND_BEFORE':
+ elif change == 'NODE_DEMAND_AFTER' or change == 'NODE_DEMAND_BEFORE': # noqa: PLR1714
if (
self._registry.settings['damage_node_model']
== 'Predefined_demand'
@@ -619,25 +619,25 @@ def removeNodeTemporaryRepair(self, damage_node_name, wn):
):
self.restoreDistributionOrginalDemand(damage_node_name, wn)
else:
- raise ValueError('unknow method')
+ raise ValueError('unknow method') # noqa: EM101, TRY003
- if if_damage_removed == False:
+ if if_damage_removed == False: # noqa: E712
self.removeDISTNodeExplicitLeak(damage_node_name, wn)
- def removePipeRepair(self, damaged_node_name, wn, action):
- restoration_table = self._registry._restoration_table
+ def removePipeRepair(self, damaged_node_name, wn, action): # noqa: C901, N802, D102
+ restoration_table = self._registry._restoration_table # noqa: SLF001
selected_restoration_table = restoration_table[
restoration_table['node_name'] == damaged_node_name
]
for ind, rec_id in selected_restoration_table.record_index.items():
- change_list = self._registry._record_registry[rec_id]
+ change_list = self._registry._record_registry[rec_id] # noqa: SLF001
to_pop_list = []
for change, name in ((k, change_list[k]) for k in reversed(change_list)):
flag = True
- if change == 'ADDED_PIPE' or change == 'ADDED_PUMP':
+ if change == 'ADDED_PIPE' or change == 'ADDED_PUMP': # noqa: PLR1714
wn.remove_link(name)
i_link_collective = False
@@ -654,19 +654,19 @@ def removePipeRepair(self, damaged_node_name, wn, action):
]
refined_damage_data = damage_data[
(damage_data['Orginal_element'] == orginal_pipe_name)
- & (damage_data['discovered'] == True)
+ & (damage_data['discovered'] == True) # noqa: E712
]
- if (refined_damage_data[action] == True).all():
+ if (refined_damage_data[action] == True).all(): # noqa: E712
if i_link_collective:
if (
- change == 'BYPASS_PIPE'
+ change == 'BYPASS_PIPE' # noqa: PLR1714
or change == 'ADDED_PIPE_A'
or (
- change == 'ADDED_PIPE_B'
+ change == 'ADDED_PIPE_B' # noqa: PLR1714
or change == 'ADDED_PIPE_C'
)
or (
- change == 'ADDED_PIPE_D'
+ change == 'ADDED_PIPE_D' # noqa: PLR1714
or change == 'ADDED_PUMP_A'
or change == 'ADDED_PUMP_B'
)
@@ -718,18 +718,18 @@ def removePipeRepair(self, damaged_node_name, wn, action):
change_list.pop(pop_key)
if len(change_list) == 0:
- restoration_table.drop(ind, inplace=True)
+ restoration_table.drop(ind, inplace=True) # noqa: PD002
- def repairPipe(self, damage_node_name, damage_type, wn):
+ def repairPipe(self, damage_node_name, damage_type, wn): # noqa: N802, D102
if damage_type == 'leak':
- pipe_A_name, pipe_B_name = self._registry.getCertainLeakData(
+ pipe_A_name, pipe_B_name = self._registry.getCertainLeakData( # noqa: N806
damage_node_name, wn
)
- pipe_A = wn.get_link(pipe_A_name)
- pipe_B = wn.get_link(pipe_B_name)
+ pipe_A = wn.get_link(pipe_A_name) # noqa: N806
+ pipe_B = wn.get_link(pipe_B_name) # noqa: N806
- end_node_of_pipe_B = pipe_B.end_node
+ end_node_of_pipe_B = pipe_B.end_node # noqa: N806
new_length = pipe_A.length + pipe_B.length
pipe_A.length = new_length
@@ -739,14 +739,14 @@ def repairPipe(self, damage_node_name, damage_type, wn):
wn.remove_node(damage_node_name, with_control=True)
elif damage_type == 'break':
- pipe_A_name, pipe_B_name, node_A_name, node_B_name = (
+ pipe_A_name, pipe_B_name, node_A_name, node_B_name = ( # noqa: N806
self._registry.getCertainBreakData(damage_node_name, wn)
)
- pipe_A = wn.get_link(pipe_A_name)
- pipe_B = wn.get_link(pipe_B_name)
+ pipe_A = wn.get_link(pipe_A_name) # noqa: N806
+ pipe_B = wn.get_link(pipe_B_name) # noqa: N806
- end_node_of_pipe_B = pipe_B.end_node
+ end_node_of_pipe_B = pipe_B.end_node # noqa: N806
new_length = pipe_A.length + pipe_B.length
pipe_A.length = new_length
@@ -756,11 +756,11 @@ def repairPipe(self, damage_node_name, damage_type, wn):
wn.remove_node(node_A_name, with_control=True)
wn.remove_node(node_B_name, with_control=True)
- def restorePumps(self, pump_name_list, wn):
+ def restorePumps(self, pump_name_list, wn): # noqa: N802, D102
for pump_name in pump_name_list:
wn.get_link(pump_name).initial_status = LinkStatus(1)
- def restoreTanks(self, tank_name_list, wn):
+ def restoreTanks(self, tank_name_list, wn): # noqa: N802, D102
for tank_name in tank_name_list:
made_up_mid_node_name = tank_name + '_tank_mid'
made_up_pipe_name = tank_name + '_tank_mid_pipe'
@@ -781,7 +781,7 @@ def restoreTanks(self, tank_name_list, wn):
wn.remove_node(made_up_mid_node_name, with_control=True)
- def removeDISTNodeIsolation(self, damaged_node_name, wn):
+ def removeDISTNodeIsolation(self, damaged_node_name, wn): # noqa: N802, D102
post_incident_node_demand = self._registry.getDamageData('DISTNODE').loc[
damaged_node_name, 'Demand2'
]
@@ -789,7 +789,7 @@ def removeDISTNodeIsolation(self, damaged_node_name, wn):
node = wn.get_node(damaged_node_name)
node.demand_timeseries_list[0].base_value = post_incident_node_demand
- def restoreDistributionOrginalDemand(self, damaged_node_name, wn):
+ def restoreDistributionOrginalDemand(self, damaged_node_name, wn): # noqa: N802, D102
if self._registry.settings['damage_node_model'] == 'Predefined_demand':
pre_incident_node_demand = self._registry.getDamageData(
'DISTNODE', iCopy=False
@@ -805,12 +805,12 @@ def restoreDistributionOrginalDemand(self, damaged_node_name, wn):
]
pre_incident_node_demand = virtual_nodes_damage_tabel.iloc[0]['Demand1']
else:
- raise ValueError('unknow method')
+ raise ValueError('unknow method') # noqa: EM101, TRY003
node = wn.get_node(damaged_node_name)
node.demand_timeseries_list[0].base_value = pre_incident_node_demand
- def removeDISTNodeExplicitLeak(self, damaged_node_name, wn):
+ def removeDISTNodeExplicitLeak(self, damaged_node_name, wn): # noqa: N802, D102
temp = self._registry.active_nodal_damages
value_key = {v: k for k, v in temp.items()}
_key = value_key[damaged_node_name]
@@ -822,11 +822,11 @@ def removeDISTNodeExplicitLeak(self, damaged_node_name, wn):
wn.remove_link(pipe_name)
wn.remove_node(reservoir_name, with_control=True)
if reservoir_name in wn.node_name_list:
- raise
+ raise # noqa: PLE0704
self._registry.removeEquavalantDamageHistory(damaged_node_name)
- def modifyDISTNodeDemandLinearMode(
+ def modifyDISTNodeDemandLinearMode( # noqa: N802, D102
self,
damage_node_name,
real_node_name,
@@ -846,7 +846,7 @@ def modifyDISTNodeDemandLinearMode(
node = wn.get_node(real_node_name)
node.demand_timeseries_list[0].base_value = new_demand
- def modifyDISTNodeExplicitLeakEmitter(
+ def modifyDISTNodeExplicitLeakEmitter( # noqa: N802, D102
self,
damage_node_name,
real_node_name,
@@ -854,7 +854,7 @@ def modifyDISTNodeExplicitLeakEmitter(
repaired_number,
total_number,
):
- nodal_data = self._registry._nodal_data[real_node_name]
+ nodal_data = self._registry._nodal_data[real_node_name] # noqa: SLF001
pipe_length = nodal_data['pipe_length']
mean_pressure = nodal_data['mean_pressure']
new_node_name = nodal_data['new_node_name']
@@ -871,29 +871,29 @@ def modifyDISTNodeExplicitLeakEmitter(
node = wn.get_node(new_node_name)
# print(real_node_name)
- if cd >= node._emitter_coefficient:
+ if cd >= node._emitter_coefficient: # noqa: SLF001
raise ValueError(
'something wrong here: '
+ repr(cd)
+ ' - '
- + repr(node._emitter_coefficient)
+ + repr(node._emitter_coefficient) # noqa: SLF001
+ ' '
+ str(damage_node_name)
+ ' '
+ str(real_node_name)
)
- node._emitter_coefficient = cd
+ node._emitter_coefficient = cd # noqa: SLF001
- def modifyDISTNodeExplicitLeakReservoir(
+ def modifyDISTNodeExplicitLeakReservoir( # noqa: N802, D102
self,
- damage_node_name,
+ damage_node_name, # noqa: ARG002
real_node_name,
wn,
repaired_number,
total_number,
):
- nodal_data = self._registry._nodal_data[real_node_name]
+ nodal_data = self._registry._nodal_data[real_node_name] # noqa: SLF001
pipe_length = nodal_data['pipe_length']
mean_pressure = nodal_data['mean_pressure']
pipe_name = nodal_data['new_pipe_name']
@@ -907,13 +907,13 @@ def modifyDISTNodeExplicitLeakReservoir(
mean_pressure,
orginal_flow,
)
- node = wn.get_node(real_node_name)
+ node = wn.get_node(real_node_name) # noqa: F841
q = orginal_flow
nd = self._registry.damage.getNd(
mean_pressure, number_of_damages, total_number
)
- equavalant_pipe_diameter = (
+ equavalant_pipe_diameter = ( # noqa: F841
((nd - 1) * q) ** 2 / (0.125 * 9.81 * math.pi**2 * mean_pressure)
) ** (1 / 4) * 1
pipe = wn.get_link(pipe_name)
@@ -921,16 +921,16 @@ def modifyDISTNodeExplicitLeakReservoir(
# raise ValueError("something wrong here: "+repr(equavalant_pipe_diameter)+" - "+repr(pipe.diameter))
pipe.diameter = pipe.diameter / 2
- def modifyDISTNodeExplicitLeak(
+ def modifyDISTNodeExplicitLeak( # noqa: N802, D102
self,
- real_damage_node_name,
+ real_damage_node_name, # noqa: ARG002
virtual_node_name,
wn,
method,
- damaged_number,
+ damaged_number, # noqa: ARG002
):
if method == 'equal_diameter':
emitter_name = self._registry.virtual_node_data[virtual_node_name][
'emitter_node'
]
- node = wn.get_node(emitter_name)
+ node = wn.get_node(emitter_name) # noqa: F841
diff --git a/modules/systemPerformance/REWET/REWET/restoration/base.py b/modules/systemPerformance/REWET/REWET/restoration/base.py
index e707a08bd..aeaa77622 100644
--- a/modules/systemPerformance/REWET/REWET/restoration/base.py
+++ b/modules/systemPerformance/REWET/REWET/restoration/base.py
@@ -1,7 +1,7 @@
"""Created on Fri Dec 25 04:00:43 2020
@author: snaeimi
-"""
+""" # noqa: INP001, D400
import copy
import logging
@@ -14,36 +14,36 @@
logger = logging.getLogger(__name__)
-def get_node_name(node_name, table):
+def get_node_name(node_name, table): # noqa: D103
if 'virtual_of' in table.columns:
real_node_name = table.loc[node_name, 'virtual_of']
if (
- real_node_name == None or real_node_name == np.nan
+ real_node_name == None or real_node_name == np.nan # noqa: E711, PLR1714
): # SINA: probably NP.NAN does not work here. Correct it.
real_node_name = node_name
return real_node_name
- else:
+ else: # noqa: RET505
return node_name
-class Coordination:
- def __init__(self, X=None, Y=None, system=None):
+class Coordination: # noqa: D101
+ def __init__(self, X=None, Y=None, system=None): # noqa: N803
self.x = X
self.y = Y
self.system = system
- def set_coord(self, X, Y, system=None):
+ def set_coord(self, X, Y, system=None): # noqa: ARG002, N803, D102
self.x = X
self.y = Y
- def get_coord(self):
+ def get_coord(self): # noqa: D102
return (self.x, self.y)
- def set_system(self, system):
+ def set_system(self, system): # noqa: D102
self.system = system
-class Location:
+class Location: # noqa: D101
def __init__(self, name, x, y):
self.name = name
self.coord = Coordination(x, y)
@@ -59,7 +59,7 @@ def __init__(self, name, x, y):
# =============================================================================
-class AgentData:
+class AgentData: # noqa: D101
def __init__(
self,
agent_name,
@@ -73,19 +73,19 @@ def __init__(
shift_obj,
agent_speed,
):
- if type(agent_type) != str:
- raise ValueError('agent type must be string')
+ if type(agent_type) != str: # noqa: E721
+ raise ValueError('agent type must be string') # noqa: EM101, TRY003
# if type(definition) != pd.Series:
# raise ValueError('definiton must be a Pandas series')
- if type(cur_x) != float:
- raise ValueError('cur_x must be float')
- if type(cur_y) != float:
- raise ValueError('cur_y must be float')
- if type(base_x) != float:
- raise ValueError('base_x must be float')
- if type(base_y) != float:
- raise ValueError('base_y must be float')
+ if type(cur_x) != float: # noqa: E721
+ raise ValueError('cur_x must be float') # noqa: EM101, TRY003
+ if type(cur_y) != float: # noqa: E721
+ raise ValueError('cur_y must be float') # noqa: EM101, TRY003
+ if type(base_x) != float: # noqa: E721
+ raise ValueError('base_x must be float') # noqa: EM101, TRY003
+ if type(base_y) != float: # noqa: E721
+ raise ValueError('base_y must be float') # noqa: EM101, TRY003
self.name = agent_name
self.agent_type = agent_type
@@ -104,7 +104,7 @@ def __init__(
self.cur_job_effect_definition_name = None
self.cur_job_method_name = None
- def isOnShift(self, time):
+ def isOnShift(self, time): # noqa: N802
"""Checks if a time is on an agent's shift
Parameters
@@ -117,11 +117,11 @@ def isOnShift(self, time):
bool
Is true if the time is on the agent's shift.
- """
- shift_name = self.shift._shift_name
+ """ # noqa: D400, D401
+ shift_name = self.shift._shift_name # noqa: SLF001
(time_start, time_finish) = self._shifting.getShiftTimes(shift_name)
- if type(time) != int and type(time) != float:
+ if type(time) != int and type(time) != float: # noqa: E721
raise ValueError('time must be integer ' + type(time))
time = int(time)
@@ -133,12 +133,12 @@ def isOnShift(self, time):
if time < time_start:
time = time + 24 * 3600
- if time >= time_start and time < time_finish:
+ if time >= time_start and time < time_finish: # noqa: SIM103
return True
- else:
+ else: # noqa: RET505
return False
- def getDistanceFromCoordinate(self, destination_coordination):
+ def getDistanceFromCoordinate(self, destination_coordination): # noqa: N802, D102
coord = self.current_location.coord.get_coord()
cur_x = coord[0]
cur_y = coord[1]
@@ -147,37 +147,37 @@ def getDistanceFromCoordinate(self, destination_coordination):
dest_y = destination_coordination[1]
distance = ((cur_x - dest_x) ** 2 + (cur_y - dest_y) ** 2) ** 0.5
- return distance
+ return distance # noqa: RET504
- def _estimateTimeOfArival(self, destination_coordination):
+ def _estimateTimeOfArival(self, destination_coordination): # noqa: N802
distance_with_method_of_choice = self.getDistanceFromCoordinate(
destination_coordination
)
time = distance_with_method_of_choice / self._avg_speed
- return time
+ return time # noqa: RET504
- def getAgentShiftEndTime(self, cur_time):
+ def getAgentShiftEndTime(self, cur_time): # noqa: N802, D102
num_of_days = int(cur_time / (24 * 3600))
- shift_name = self.shift._shift_name
+ shift_name = self.shift._shift_name # noqa: SLF001
(time_start, time_finish) = self._shifting.getShiftTimes(shift_name)
if time_start < time_finish or cur_time % (24 * 3600) <= time_finish:
return time_finish + 24 * 3600 * num_of_days
- else:
+ else: # noqa: RET505
return time_finish + 24 * 3600 * (num_of_days + 1)
- def getShiftLength(self):
- shift_name = self.shift._shift_name
+ def getShiftLength(self): # noqa: N802, D102
+ shift_name = self.shift._shift_name # noqa: SLF001
(time_start, time_finish) = self._shifting.getShiftTimes(shift_name)
if time_start < time_finish:
return time_finish - time_start
- else:
+ else: # noqa: RET505
return 24 * 3600 - time_start + time_finish
- def setJob(
+ def setJob( # noqa: N802, D102
self,
node_name,
action,
@@ -186,10 +186,10 @@ def setJob(
method_name,
time_arival,
time_done,
- iOnGoing,
+ iOnGoing, # noqa: N803
):
- if self.isWorking == True:
- raise ValueError('The current agent is working')
+ if self.isWorking == True: # noqa: E712
+ raise ValueError('The current agent is working') # noqa: EM101, TRY003
self.isWorking = True
self.cur_job_location = node_name
@@ -202,7 +202,7 @@ def setJob(
self.job_end_time = time_done
-class Agents:
+class Agents: # noqa: D101
def __init__(self, registry, shifting, jobs, restoration_log_book):
# data: is the
# type: agent type
@@ -218,7 +218,7 @@ def __init__(self, registry, shifting, jobs, restoration_log_book):
self.restoration_log_book = restoration_log_book
self.registry = registry
- def addAgent(self, agent_name, agent_type, definition):
+ def addAgent(self, agent_name, agent_type, definition): # noqa: N802
"""Adds agent to the agent list
Parameters
@@ -232,7 +232,7 @@ def addAgent(self, agent_name, agent_type, definition):
-------
None.
- """
+ """ # noqa: D400, D401
# number_of_agents = int(definition['Number'])
agent_speed = self.registry.settings['crew_travel_speed']
temp_agent_data = AgentData(
@@ -258,7 +258,7 @@ def addAgent(self, agent_name, agent_type, definition):
if agent_type not in self.group_names:
self.group_names[agent_type] = definition['group_name']
- def setActiveAgents(self, active_agent_ID_list):
+ def setActiveAgents(self, active_agent_ID_list): # noqa: N802, N803
"""Set agents active by a list of agents' ID
Parameters
@@ -270,11 +270,11 @@ def setActiveAgents(self, active_agent_ID_list):
-------
None.
- """
- for active_agent_ID in active_agent_ID_list:
+ """ # noqa: D400
+ for active_agent_ID in active_agent_ID_list: # noqa: N806
self._agents['active'].loc[active_agent_ID] = True
- def getAgentGroupTagList(self, typed_ready_agent):
+ def getAgentGroupTagList(self, typed_ready_agent): # noqa: N802, D102
ret = [None]
agent_type = typed_ready_agent['type'].iloc[0]
if 'group' in typed_ready_agent:
@@ -288,10 +288,10 @@ def getAgentGroupTagList(self, typed_ready_agent):
raise RuntimeError('None in agent type: ' + repr(agent_type))
return ret, self.group_names[agent_type]
- def getAllAgentTypes(self):
+ def getAllAgentTypes(self): # noqa: N802, D102
return self._agents['type'].unique().tolist()
- def getAllAgent(self):
+ def getAllAgent(self): # noqa: N802
"""Get a copy of all agent dataframe.
Returns
@@ -301,52 +301,52 @@ def getAllAgent(self):
"""
return self._agents.copy(deep=True)
- def setChangeShift(self, time, working_check=True):
- for name, agent in self._agents.iterrows():
+ def setChangeShift(self, time, working_check=True): # noqa: FBT002, ARG002, N802, D102
+ for name, agent in self._agents.iterrows(): # noqa: B007
if self._agents.loc[name, 'data'].isOnShift(time):
if (
- self._agents.loc[name, 'active'] == False
+ self._agents.loc[name, 'active'] == False # noqa: E712
): # if agent is active already and is on shift, it means that the agent has been active before the shift change event
- if self._agents.loc[name, 'available'] == True:
+ if self._agents.loc[name, 'available'] == True: # noqa: E712
self._agents.loc[name, 'active'] = True
self._agents.loc[name, 'ready'] = True
else:
if (
- self._agents.loc[name, 'ready'] == True
- and self._agents.loc[name, 'data'].isWorking == True
+ self._agents.loc[name, 'ready'] == True # noqa: E712
+ and self._agents.loc[name, 'data'].isWorking == True # noqa: E712
):
raise RuntimeError(name + ' is working')
self._agents.loc[name, 'active'] = False
self._agents.loc[name, 'ready'] = False
- def initializeActiveAgents(self, time):
- for name, agent in self._agents.iterrows():
+ def initializeActiveAgents(self, time): # noqa: N802, D102
+ for name, agent in self._agents.iterrows(): # noqa: B007
if self._agents.loc[name, 'data'].isOnShift(time):
self._agents.loc[name, 'active'] = True
else:
self._agents.loc[name, 'active'] = False
- def initializeReadyAgents(self):
- for name, agent in self._agents.iterrows():
- if self._agents.loc[name, 'active'] == True:
+ def initializeReadyAgents(self): # noqa: N802, D102
+ for name, agent in self._agents.iterrows(): # noqa: B007
+ if self._agents.loc[name, 'active'] == True: # noqa: E712
self._agents.loc[name, 'ready'] = True
else:
self._agents.loc[name, 'ready'] = False
- def getReadyAgents(self):
+ def getReadyAgents(self): # noqa: N802, D102
temp = self._agents[
- (self._agents['ready'] == True) & (self._agents['available'] == True)
+ (self._agents['ready'] == True) & (self._agents['available'] == True) # noqa: E712
]
check_temp = temp['active'].all()
- if check_temp == False:
- print(temp[temp['active'] == False])
- raise ValueError('At least one agent is ready although is not on shift')
+ if check_temp == False: # noqa: E712
+ print(temp[temp['active'] == False]) # noqa: T201, E712
+ raise ValueError('At least one agent is ready although is not on shift') # noqa: EM101, TRY003
return temp
- def getAvailabilityRatio(self, agent_type, time):
- if agent_type == 'WQOperator' or agent_type == 'WQWorker':
+ def getAvailabilityRatio(self, agent_type, time): # noqa: N802, D102
+ if agent_type == 'WQOperator' or agent_type == 'WQWorker': # noqa: PLR1714
av_data = pd.Series(data=[0, 0.5, 1], index=[0, 4, 24])
elif agent_type == 'CONT':
av_data = pd.Series(data=[0, 0, 1], index=[0, 48, 49])
@@ -358,19 +358,19 @@ def getAvailabilityRatio(self, agent_type, time):
return temp[time]
if time > temp.index.max():
return temp[temp.index.max()]
- else:
+ else: # noqa: RET505
temp[time] = np.nan
- temp.sort_index(inplace=True)
- temp.interpolate(method='index', inplace=True)
+ temp.sort_index(inplace=True) # noqa: PD002
+ temp.interpolate(method='index', inplace=True) # noqa: PD002
return temp[time]
- def getDefaultAvailabilityRatio(agent_type, self):
- if agent_type == 'WQOperator' or agent_type == 'WQWorker':
+ def getDefaultAvailabilityRatio(agent_type, self): # noqa: ARG002, N802, N805, D102
+ if agent_type == 'WQOperator' or agent_type == 'WQWorker': # noqa: PLR1714
return 0
- else:
+ else: # noqa: RET505
return 1
- def assignsJobToAgent(
+ def assignsJobToAgent( # noqa: C901, N802, D102
self,
agent_name,
node_name,
@@ -382,21 +382,21 @@ def assignsJobToAgent(
number_of_damages,
orginal_element,
):
- if self._agents.loc[agent_name, 'active'] != True:
+ if self._agents.loc[agent_name, 'active'] != True: # noqa: E712
raise ValueError('Agent ' + agent_name + ' is not active')
- if self._agents.loc[agent_name, 'ready'] != True:
+ if self._agents.loc[agent_name, 'ready'] != True: # noqa: E712
raise ValueError('Agent ' + agent_name + ' is not ready')
- if self._agents.loc[agent_name, 'data'].isOnShift(time) != True:
+ if self._agents.loc[agent_name, 'data'].isOnShift(time) != True: # noqa: E712
raise ValueError('Agent ' + agent_name + ' is not on shift')
- if self._agents.loc[agent_name, 'data'].isWorking == True:
+ if self._agents.loc[agent_name, 'data'].isWorking == True: # noqa: E712
raise ValueError('Agent ' + agent_name + ' is working')
# logger.debug('Assiging job to '+agent_name)
real_node_name = node_name
- if self._jobs._rm.entity[entity] == 'DISTNODE':
- damage_data = self._jobs._rm._registry.getDamageData(
+ if self._jobs._rm.entity[entity] == 'DISTNODE': # noqa: SLF001
+ damage_data = self._jobs._rm._registry.getDamageData( # noqa: SLF001
'DISTNODE', iCopy=False
)
if 'virtual_of' in damage_data.columns:
@@ -405,7 +405,7 @@ def assignsJobToAgent(
coord = wn.get_node(real_node_name).coordinates
agent_type = self._agents.loc[agent_name, 'type']
- _ETA = self._agents.loc[agent_name, 'data']._estimateTimeOfArival(coord)
+ _ETA = self._agents.loc[agent_name, 'data']._estimateTimeOfArival(coord) # noqa: SLF001, N806
effect_definition_name = self._jobs.getEffectDefinitionName(
agent_type, action, entity
)
@@ -413,13 +413,13 @@ def assignsJobToAgent(
node_name, effect_definition_name, entity
)
- if method_name == None:
+ if method_name == None: # noqa: E711
raise ValueError(
'No method is applicale for ' + repr(effect_definition_name)
)
- if reminded_time == None:
- _ETJ = self._jobs.getAJobEstimate(
+ if reminded_time == None: # noqa: E711
+ _ETJ = self._jobs.getAJobEstimate( # noqa: N806
orginal_element,
agent_type,
entity,
@@ -428,12 +428,12 @@ def assignsJobToAgent(
number_of_damages,
)
else:
- _ETJ = int(reminded_time)
+ _ETJ = int(reminded_time) # noqa: N806
if reminded_time < 0:
raise ValueError('Something wrong here: ' + repr(reminded_time))
if effect_definition_name != 'CHECK':
- method_line = self._jobs._effect_data[effect_definition_name][
+ method_line = self._jobs._effect_data[effect_definition_name][ # noqa: SLF001
method_name
]
else:
@@ -444,7 +444,7 @@ def assignsJobToAgent(
collective = None
if 'SKIP' in effects_only:
return (False, 'SKIP', None, collective)
- elif 'FASTCHECK' in effects_only:
+ elif 'FASTCHECK' in effects_only: # noqa: RET505
return (False, 'FASTCHECK', None, collective)
elif 'RECONNECT' in effects_only:
collective = 'BYPASS'
@@ -456,15 +456,15 @@ def assignsJobToAgent(
collective = 'ISOLATE_DN'
if _ETA < 0 or _ETJ <= 0:
- print(
+ print( # noqa: T201
str(_ETA)
+ ' '
+ str(effect_definition_name)
+ ' '
+ str(orginal_element)
)
- print(str(method_name) + ' ' + str(_ETJ))
- raise ValueError('Subzero ETA or sub-equal-zero ETJ')
+ print(str(method_name) + ' ' + str(_ETJ)) # noqa: T201
+ raise ValueError('Subzero ETA or sub-equal-zero ETJ') # noqa: EM101, TRY003
end_time = time + _ETA + _ETJ
agent_shift_change_time = self._agents.loc[
@@ -472,17 +472,17 @@ def assignsJobToAgent(
].getAgentShiftEndTime(time)
shift_length = self._agents.loc[agent_name, 'data'].getShiftLength()
- minimum_job_time = self._jobs._rm._registry.settings['minimum_job_time']
+ minimum_job_time = self._jobs._rm._registry.settings['minimum_job_time'] # noqa: SLF001, F841
if end_time <= agent_shift_change_time:
iget = 'INSIDE_SHIFT'
- iOnGoing = False
+ iOnGoing = False # noqa: N806
elif (
end_time > agent_shift_change_time
and (shift_length - 2 * 3600) < _ETJ
and (time + _ETA + 2 * 3600) < agent_shift_change_time
):
iget = 'OUTSIDE_SHIFT'
- iOnGoing = True
+ iOnGoing = True # noqa: N806
else:
# logger.warning(agent_name+', '+node_name+', '+repr(end_time))
iget = 'ShortOfTime'
@@ -518,34 +518,34 @@ def assignsJobToAgent(
return (True, iget, _ETJ, collective)
- def getJobEndTime(self, agent_name, icheck=True):
+ def getJobEndTime(self, agent_name, icheck=True): # noqa: FBT002, N802, D102
end_time = self._agents.loc[agent_name, 'data'].job_end_time
- if icheck == True and end_time == None:
- raise ValueError('No Time is assigned to agent')
+ if icheck == True and end_time == None: # noqa: E711, E712
+ raise ValueError('No Time is assigned to agent') # noqa: EM101, TRY003
if (
- icheck == True
- and self._agents.loc[agent_name, 'data'].isWorking == False
+ icheck == True # noqa: E712
+ and self._agents.loc[agent_name, 'data'].isWorking == False # noqa: E712
):
- raise ValueError('The agent is not working')
+ raise ValueError('The agent is not working') # noqa: EM101, TRY003
return end_time
- def getJobArivalTime(self, agent_name, icheck=True):
- arival_time = self._agents.loc[agent_name, 'data']._time_of_arival
- if icheck == True and arival_time == None:
- raise ValueError('No Time is assigned to agent')
+ def getJobArivalTime(self, agent_name, icheck=True): # noqa: FBT002, N802, D102
+ arival_time = self._agents.loc[agent_name, 'data']._time_of_arival # noqa: SLF001
+ if icheck == True and arival_time == None: # noqa: E711, E712
+ raise ValueError('No Time is assigned to agent') # noqa: EM101, TRY003
if (
- icheck == True
- and self._agents.loc[agent_name, 'data'].isWorking == False
+ icheck == True # noqa: E712
+ and self._agents.loc[agent_name, 'data'].isWorking == False # noqa: E712
):
- raise ValueError('The agent is not working')
+ raise ValueError('The agent is not working') # noqa: EM101, TRY003
return arival_time
- def releaseAgent(self, agent_name):
- if self._agents.loc[agent_name, 'ready'] == True:
+ def releaseAgent(self, agent_name): # noqa: N802, D102
+ if self._agents.loc[agent_name, 'ready'] == True: # noqa: E712
raise ValueError(agent_name + ' is already ready')
- if self._agents.loc[agent_name, 'active'] != True:
+ if self._agents.loc[agent_name, 'active'] != True: # noqa: E712
raise ValueError(agent_name + ' is not active')
- if self._agents.loc[agent_name, 'data'].isWorking == False:
+ if self._agents.loc[agent_name, 'data'].isWorking == False: # noqa: E712
raise ValueError(agent_name + ' is not working')
self._agents.loc[agent_name, 'ready'] = True
@@ -554,14 +554,14 @@ def releaseAgent(self, agent_name):
self._agents.loc[agent_name, 'data'].cur_job_location = None
self._agents.loc[agent_name, 'data'].cur_job_action = None
self._agents.loc[agent_name, 'data'].cur_job_entity = None
- self._agents.loc[agent_name, 'data']._time_of_arival = None
+ self._agents.loc[agent_name, 'data']._time_of_arival = None # noqa: SLF001
self._agents.loc[agent_name, 'data'].cur_job_effect_definition_name = None
self._agents.loc[agent_name, 'data'].cur_job_method_name = None
self._agents.loc[agent_name, 'data'].job_end_time = None
self._agents.loc[agent_name, 'data'].cur_job_ongoing = None
-class AgentShift:
+class AgentShift: # noqa: D101
# , shifting_obj):
def __init__(self, agent_name, name):
self._agent_name = agent_name
@@ -569,12 +569,12 @@ def __init__(self, agent_name, name):
# shifting_obj.addAgentShift(self._agent_name, self._shift_name)
-class Shifting:
+class Shifting: # noqa: D101
def __init__(self):
self._all_agent_shift_data = {}
self._shift_data = pd.DataFrame(columns=['begining', 'end'])
- def addShift(self, name, beginning, ending):
+ def addShift(self, name, beginning, ending): # noqa: N802
"""Adds a shift to shift registry
Parameters
@@ -600,14 +600,14 @@ def addShift(self, name, beginning, ending):
-------
None.
- """
+ """ # noqa: D400, D401
if name in self._shift_data:
- raise ValueError('Shift name already registered')
- if type(beginning) != int and type(beginning) != float:
+ raise ValueError('Shift name already registered') # noqa: EM101, TRY003
+ if type(beginning) != int and type(beginning) != float: # noqa: E721
raise ValueError(
'Beginning time must be integer: ' + str(type(beginning))
)
- if type(ending) != int and type(ending) != float:
+ if type(ending) != int and type(ending) != float: # noqa: E721
raise ValueError('Ending time must be integer: ' + str(type(ending)))
if beginning > 24 * 3600:
raise ValueError(
@@ -620,13 +620,13 @@ def addShift(self, name, beginning, ending):
self._shift_data.loc[name] = [beginning, ending]
- def getShiftTimes(self, name):
+ def getShiftTimes(self, name): # noqa: N802, D102
return (
self._shift_data['begining'].loc[name],
self._shift_data['end'].loc[name],
)
- def getNextShiftTime(self, time):
+ def getNextShiftTime(self, time): # noqa: N802, D102
daily_time = time % (24 * 3600)
num_of_days = int(time / (24 * 3600))
@@ -651,9 +651,9 @@ def getNextShiftTime(self, time):
# next_shift_time = time +(change_shift_time - daily_time)
# else:
- return change_shift_time
+ return change_shift_time # noqa: RET504
- def assignShiftToAgent(self, agent_ID, shift_name):
+ def assignShiftToAgent(self, agent_ID, shift_name): # noqa: N802, N803
"""Assigns shift to agent
Parameters
@@ -673,16 +673,16 @@ def assignShiftToAgent(self, agent_ID, shift_name):
-------
None.
- """
+ """ # noqa: D400, D401
if agent_ID in self._all_agent_shift_data:
- raise ValueError('The agent ID currently in Agent ALl Shifts')
+ raise ValueError('The agent ID currently in Agent ALl Shifts') # noqa: EM101, TRY003
if shift_name not in self._shift_data:
- raise ValueError('shift data is not in registered as shifts')
+ raise ValueError('shift data is not in registered as shifts') # noqa: EM101, TRY003
self._all_agent_shift_data[agent_ID] = shift_name
-class DispatchRule:
+class DispatchRule: # noqa: D101
def __init__(self, settings, method='deterministic', exclude=None):
self.settings = settings
self._rules = {}
@@ -721,10 +721,10 @@ def __init__(self, settings, method='deterministic', exclude=None):
# for key in exclude:
# self._rules.pop(key)
- for key, d in self._rules.items():
+ for key, d in self._rules.items(): # noqa: B007, PERF102
self._cumulative[key] = self._rules[key].cumsum()
- def getDiscoveredPrecentage(self, time):
+ def getDiscoveredPrecentage(self, time): # noqa: N802, D102
res = {}
for key in self._cumulative:
temp = self._cumulative[key].copy()
@@ -736,13 +736,13 @@ def getDiscoveredPrecentage(self, time):
res[key] = temp[temp.index.max()]
else:
temp[time] = np.nan
- temp.sort_index(inplace=True)
- temp.interpolate(method='index', inplace=True)
+ temp.sort_index(inplace=True) # noqa: PD002
+ temp.interpolate(method='index', inplace=True) # noqa: PD002
res[key] = temp[time]
return res
-class Dispatch:
+class Dispatch: # noqa: D101
def __init__(self, restoration, settings, discovery_interval=0, method='old'):
self.settings = settings
self.method = method
@@ -780,12 +780,12 @@ def __init__(self, restoration, settings, discovery_interval=0, method='old'):
continue
self._last_discovered_number[el] = 0
- self._rm._registry.addAttrToPipeDamageTable('discovered', False)
- self._rm._registry.addAttrToDistNodeDamageTable('discovered', False)
+ self._rm._registry.addAttrToPipeDamageTable('discovered', False) # noqa: FBT003, SLF001
+ self._rm._registry.addAttrToDistNodeDamageTable('discovered', False) # noqa: FBT003, SLF001
- def updateDiscovery(self, time):
+ def updateDiscovery(self, time): # noqa: C901, N802, D102
if time < self._rm.restoration_start_time:
- print('Time is less than init time')
+ print('Time is less than init time') # noqa: T201
else:
# if self.method == 'old':
@@ -805,30 +805,30 @@ def updateDiscovery(self, time):
'leak_time'
]
- pipe_damage_table = self._rm._registry._pipe_damage_table
+ pipe_damage_table = self._rm._registry._pipe_damage_table # noqa: SLF001
not_discovered_pipe_damage_table = pipe_damage_table[
- pipe_damage_table['discovered'] == False
+ pipe_damage_table['discovered'] == False # noqa: E712
]
to_be_checked_node_list = list(
not_discovered_pipe_damage_table.index
)
breaks_not_discovered_pipe_damage_table = pipe_damage_table[
- (pipe_damage_table['discovered'] == False)
+ (pipe_damage_table['discovered'] == False) # noqa: E712
& (pipe_damage_table['damage_type'] == 'break')
]
- not_discovered_break_node_B = (
- self._rm._registry._pipe_break_history.loc[
+ not_discovered_break_node_B = ( # noqa: N806
+ self._rm._registry._pipe_break_history.loc[ # noqa: SLF001
breaks_not_discovered_pipe_damage_table.index, 'Node_B'
]
)
- not_dicovered_node_B_list = not_discovered_break_node_B.to_list()
+ not_dicovered_node_B_list = not_discovered_break_node_B.to_list() # noqa: N806
to_be_checked_node_list.extend(not_dicovered_node_B_list)
# break_pair = zip(breaks_not_discovered_pipe_damage_table, not_discovered_break_node_B)
# not_discovered_pipe_damage_name_list = list(not_discovered_pipe_damage_table.index)
# breaks_not_discovered_pipe_damage_table
# all_nodes_name_list = set(self._rm._registry.result.columns)
available_nodes = set(
- self._rm._registry.result.node['demand'].columns
+ self._rm._registry.result.node['demand'].columns # noqa: SLF001
)
to_be_checked_node_list = set(to_be_checked_node_list)
shared_nodes_name_list = (
@@ -837,7 +837,7 @@ def updateDiscovery(self, time):
- (to_be_checked_node_list - available_nodes)
)
if len(shared_nodes_name_list) > 0:
- leaking_nodes_result = self._rm._registry.result.node['demand'][
+ leaking_nodes_result = self._rm._registry.result.node['demand'][ # noqa: SLF001
list(shared_nodes_name_list)
]
@@ -847,7 +847,7 @@ def updateDiscovery(self, time):
discovered_bool = leaking_nodes_result >= pipe_leak_criteria
discovered_bool_temp = discovered_bool.any()
discovered_bool_temp = discovered_bool_temp[
- discovered_bool_temp == True
+ discovered_bool_temp == True # noqa: E712
]
to_be_discoverd = discovered_bool_temp.index.to_list()
@@ -864,7 +864,7 @@ def updateDiscovery(self, time):
# to_be_discoverd = list(to_be_discoverd.index)
for discovery_candidate in to_be_discoverd:
if discovery_candidate in not_dicovered_node_B_list:
- candidate_break_A = not_discovered_break_node_B[
+ candidate_break_A = not_discovered_break_node_B[ # noqa: N806
not_discovered_break_node_B == discovery_candidate
].index[0]
discovery_list.add(candidate_break_A)
@@ -884,9 +884,9 @@ def updateDiscovery(self, time):
'leak_time'
]
- nodal_damage_table = self._rm._registry._node_damage_table
+ nodal_damage_table = self._rm._registry._node_damage_table # noqa: SLF001
not_discovered_nodal_damage_table = nodal_damage_table[
- nodal_damage_table['discovered'] == False
+ nodal_damage_table['discovered'] == False # noqa: E712
]
if 'virtual_of' in not_discovered_nodal_damage_table.columns:
to_be_checked_node_list = list(
@@ -897,7 +897,7 @@ def updateDiscovery(self, time):
not_discovered_nodal_damage_table.index
)
available_leak_nodes = set(
- self._rm._registry.result.node['leak'].columns
+ self._rm._registry.result.node['leak'].columns # noqa: SLF001
)
to_be_checked_node_list = set(to_be_checked_node_list)
shared_nodes_name_list = (
@@ -907,7 +907,7 @@ def updateDiscovery(self, time):
)
if len(shared_nodes_name_list) > 0:
shared_nodes_name_list = list(shared_nodes_name_list)
- leaking_nodes_result = self._rm._registry.result.node['leak'][
+ leaking_nodes_result = self._rm._registry.result.node['leak'][ # noqa: SLF001
shared_nodes_name_list
]
leaking_nodes_result = leaking_nodes_result.sort_index()
@@ -936,7 +936,7 @@ def updateDiscovery(self, time):
)
discovered_bool_temp = discovered_bool.any()
discovered_bool_temp = discovered_bool_temp[
- discovered_bool_temp == True
+ discovered_bool_temp == True # noqa: E712
]
discovered_list = discovered_bool_temp.index.to_list()
if 'virtual_of' in not_discovered_nodal_damage_table.columns:
@@ -961,7 +961,7 @@ def updateDiscovery(self, time):
# else:
# raise ValueError('Unknown method: '+repr(self.method))
- def _getDamageNumbers(self, discovered_ratios):
+ def _getDamageNumbers(self, discovered_ratios): # noqa: N802
num_damaged_entity = {}
for el in discovered_ratios:
@@ -973,21 +973,21 @@ def _getDamageNumbers(self, discovered_ratios):
+ ' in element = '
+ el
)
- else:
+ else: # noqa: RET506
discovered_ratios[el] = 1
- temp = len(self._rm._registry.getDamageData(el))
+ temp = len(self._rm._registry.getDamageData(el)) # noqa: SLF001
num_damaged_entity[el] = int(np.round(temp * discovered_ratios[el]))
return num_damaged_entity
- def _updateDamagesNumbers(self, discovered_numbers):
+ def _updateDamagesNumbers(self, discovered_numbers): # noqa: N802
for el in discovered_numbers:
if self._last_discovered_number[el] > discovered_numbers[el]:
raise ValueError(
'Discovered number is less than what it used to be in element '
+ el
)
- elif self._last_discovered_number[el] < discovered_numbers[el]:
- refined_damaged_table = self._rm._registry.getDamageData(el)
+ elif self._last_discovered_number[el] < discovered_numbers[el]: # noqa: RET506
+ refined_damaged_table = self._rm._registry.getDamageData(el) # noqa: SLF001
if len(refined_damaged_table) < discovered_numbers[el]:
raise ValueError(
'discovered number is bigger than all damages in element'
@@ -995,12 +995,12 @@ def _updateDamagesNumbers(self, discovered_numbers):
)
discovered_damage_table = refined_damaged_table[
- refined_damaged_table['discovered'] == True
+ refined_damaged_table['discovered'] == True # noqa: E712
]
if discovered_numbers[el] <= len(discovered_damage_table):
continue
undiscovered_damage_table = refined_damaged_table[
- refined_damaged_table['discovered'] == False
+ refined_damaged_table['discovered'] == False # noqa: E712
]
# =============================================================================
@@ -1023,28 +1023,28 @@ def _updateDamagesNumbers(self, discovered_numbers):
used_number = []
for i in used_number:
temp_index = undiscovered_damage_table.index[i]
- self._rm._registry.updateElementDamageTable(
+ self._rm._registry.updateElementDamageTable( # noqa: SLF001
el,
'discovered',
temp_index,
- True,
+ True, # noqa: FBT003
icheck=True,
)
if el == 'PIPE':
- refined_damaged_table = self._rm._registry.getDamageData(el)
+ refined_damaged_table = self._rm._registry.getDamageData(el) # noqa: SLF001
discovered_damage_table = refined_damaged_table[
- refined_damaged_table['discovered'] == True
+ refined_damaged_table['discovered'] == True # noqa: E712
]
self._last_discovered_number[el] = discovered_numbers[el]
-class Priority:
+class Priority: # noqa: D101
def __init__(self, restoration):
self._data = {}
self._rm = restoration
- def addData(self, agent_type, priority, order):
+ def addData(self, agent_type, priority, order): # noqa: N802, D102
if agent_type not in self._data:
self._data[agent_type] = pd.Series(index=[priority], data=[order])
else:
@@ -1058,7 +1058,7 @@ def addData(self, agent_type, priority, order):
)
self._data[agent_type].loc[priority] = order
- def getPriority(self, agent_type, priority):
+ def getPriority(self, agent_type, priority): # noqa: N802, D102
if agent_type not in self._data:
raise ValueError(
'The agent type('
@@ -1079,7 +1079,7 @@ def getPriority(self, agent_type, priority):
return temp.loc[priority]
- def getHydSigDamageGroups(self):
+ def getHydSigDamageGroups(self): # noqa: N802, D102
damage_group_list = set()
for crew_type in self._data:
whole_priority_list = self._data[crew_type]
@@ -1090,10 +1090,10 @@ def getHydSigDamageGroups(self):
if cur_second_priority.upper() == 'HYDSIG':
cur_damage_group = primary_priority_list[i][1]
damage_group_list.add(cur_damage_group)
- i += 1
+ i += 1 # noqa: SIM113
return damage_group_list
- def sortDamageTable(
+ def sortDamageTable( # noqa: C901, N802, D102
self,
wn,
entity_data,
@@ -1111,14 +1111,14 @@ def sortDamageTable(
name_sugest = 'Priority_' + str(target_priority_index) + '_dist'
- if target_priority == None:
+ if target_priority == None: # noqa: E711
target_priority = target_priority_list[order_index]
- if target_priority == None:
+ if target_priority == None: # noqa: E711
return entity_data
- elif target_priority in self._rm.proximity_points:
- Proximity_list = self._rm.proximity_points[target_priority]
+ elif target_priority in self._rm.proximity_points: # noqa: RET505
+ Proximity_list = self._rm.proximity_points[target_priority] # noqa: N806
node_name_list = list(entity_data.index)
for node_name in node_name_list:
# Sina: you can enhance the run time speed with having x, y coordinates in the damage table and not producing and dropping them each time
@@ -1139,11 +1139,11 @@ def sortDamageTable(
dist_only_entity_table = entity_data[columns_to_drop]
min_dist_entity_table = dist_only_entity_table.min(axis=1)
entity_data.loc[:, name_sugest] = min_dist_entity_table
- entity_data.sort_values(by=name_sugest, ascending=True, inplace=True)
+ entity_data.sort_values(by=name_sugest, ascending=True, inplace=True) # noqa: PD002
columns_to_drop.append(name_sugest)
columns_to_drop.append('X_COORD')
columns_to_drop.append('Y_COORD')
- entity_data.drop(columns=columns_to_drop, inplace=True)
+ entity_data.drop(columns=columns_to_drop, inplace=True) # noqa: PD002
# Sina: It does nothing. When there are less damage location within
# the priority definition for the crew type, this works fine, but
@@ -1164,14 +1164,14 @@ def sortDamageTable(
target_priority='CLOSEST',
)
else:
- all_time_index = self._rm._registry.result.link[
+ all_time_index = self._rm._registry.result.link[ # noqa: SLF001
'flowrate'
].index[: self._rm.restoration_start_time + 1]
pipe_name_list = entity_data.loc[:, 'Orginal_element']
last_valid_time = [
cur_time
for cur_time in all_time_index
- if cur_time not in self._rm._registry.result.maximum_trial_time
+ if cur_time not in self._rm._registry.result.maximum_trial_time # noqa: SLF001
]
last_valid_time.sort()
if len(last_valid_time) == 0:
@@ -1181,20 +1181,20 @@ def sortDamageTable(
name_sugest = 'Priority_' + str(target_priority_index) + '_dist'
flow_rate = (
- self._rm._registry.result.link['flowrate']
+ self._rm._registry.result.link['flowrate'] # noqa: SLF001
.loc[last_valid_time, pipe_name_list]
.abs()
)
entity_data.loc[:, name_sugest] = flow_rate.to_list()
- entity_data.sort_values(name_sugest, ascending=False, inplace=True)
- entity_data.drop(columns=name_sugest, inplace=True)
+ entity_data.sort_values(name_sugest, ascending=False, inplace=True) # noqa: PD002
+ entity_data.drop(columns=name_sugest, inplace=True) # noqa: PD002
elif (
target_priority in self._rm.proximity_points
and target_priority != 'WaterSource2'
):
- all_node_table = self._rm._registry.all_node_table
- Proximity_list = self._rm.proximity_points[target_priority]
+ all_node_table = self._rm._registry.all_node_table # noqa: SLF001
+ Proximity_list = self._rm.proximity_points[target_priority] # noqa: N806
node_name_list = list(entity_data.index)
for node_name in node_name_list:
# Sina: you can enhance the run time speed with having x, y coordinates in the damage table and not producing and dropping them each time
@@ -1232,7 +1232,7 @@ def sortDamageTable(
length / np.power(d, 4.8655) / np.power(roughness, 1.852)
+ 1 / d
)
- except:
+ except: # noqa: E722
cost = 0.00001
weight = cost
@@ -1254,7 +1254,7 @@ def sortDamageTable(
orginal_pipe_name_list = entity_data['Orginal_element']
damaged_pipe_node_list = [
- self._rm._registry.undamaged_link_node_list[link_node_names]
+ self._rm._registry.undamaged_link_node_list[link_node_names] # noqa: SLF001
for link_node_names in orginal_pipe_name_list
]
try:
@@ -1302,11 +1302,11 @@ def sortDamageTable(
dist_only_entity_table = entity_data[columns_to_drop]
min_dist_entity_table = dist_only_entity_table.min(axis=1)
entity_data.loc[:, name_sugest] = min_dist_entity_table
- entity_data.sort_values(by=name_sugest, ascending=True, inplace=True)
+ entity_data.sort_values(by=name_sugest, ascending=True, inplace=True) # noqa: PD002
columns_to_drop.append(name_sugest)
columns_to_drop.append('X_COORD')
columns_to_drop.append('Y_COORD')
- entity_data.drop(columns=columns_to_drop, inplace=True)
+ entity_data.drop(columns=columns_to_drop, inplace=True) # noqa: PD002
# print(entity_data)
# print("+++++++++++++++++++++++++++++++++++++++")
@@ -1329,13 +1329,13 @@ def sortDamageTable(
)
else:
name_sugest = 'Priority_' + str(target_priority_index) + '_dist'
- hyd_sig = self._rm._registry.hydraulic_significance[
+ hyd_sig = self._rm._registry.hydraulic_significance[ # noqa: SLF001
entity_data['Orginal_element']
]
entity_data.loc[:, name_sugest] = hyd_sig.to_list()
- entity_data.sort_values(name_sugest, ascending=False, inplace=True)
- entity_data.drop(columns=name_sugest, inplace=True)
+ entity_data.sort_values(name_sugest, ascending=False, inplace=True) # noqa: PD002
+ entity_data.drop(columns=name_sugest, inplace=True) # noqa: PD002
# If element type is not leakable, it does nothing. IF nodes are not
# Checked (i.e. check is not at the sequence before the current action)
@@ -1350,13 +1350,13 @@ def sortDamageTable(
# node_name_vir = get_node_name(node_name, entity_data)
# real_node_name_list.append(node_name_vir)
element_type = self._rm.entity[entity]
- leak_data = self._rm._registry.getMostLeakAtCheck(
+ leak_data = self._rm._registry.getMostLeakAtCheck( # noqa: SLF001
node_name_list, element_type
)
if leak_data is not None:
entity_data.loc[node_name_list, name_sugest] = leak_data
- entity_data.sort_values(by=name_sugest, ascending=True, inplace=True)
- entity_data.drop(columns=[name_sugest], inplace=True)
+ entity_data.sort_values(by=name_sugest, ascending=True, inplace=True) # noqa: PD002
+ entity_data.drop(columns=[name_sugest], inplace=True) # noqa: PD002
else:
entity_data = self.sortDamageTable(
entity_data,
@@ -1373,11 +1373,11 @@ def sortDamageTable(
return entity_data
- def isAgentTypeInPriorityData(self, agent_type):
+ def isAgentTypeInPriorityData(self, agent_type): # noqa: N802, D102
return agent_type in self._data
-class Jobs:
+class Jobs: # noqa: D101
def __init__(self, restoration):
self._rm = restoration
self._job_list = pd.DataFrame(
@@ -1389,11 +1389,11 @@ def __init__(self, restoration):
self._final_method = {}
self._once = {}
- def addEffect(self, effect_name, method_name, def_data):
+ def addEffect(self, effect_name, method_name, def_data): # noqa: N802, D102
if effect_name not in self._effect_data:
self._effect_data[effect_name] = None
- if self._effect_data[effect_name] != None:
+ if self._effect_data[effect_name] != None: # noqa: E711
if method_name in self._effect_data[effect_name]:
raise ValueError(
'Dupplicate method_name is given. Effect name: '
@@ -1402,14 +1402,14 @@ def addEffect(self, effect_name, method_name, def_data):
+ str(method_name)
)
- if self._effect_data[effect_name] == None:
+ if self._effect_data[effect_name] == None: # noqa: E711
temp = {}
temp[method_name] = def_data
self._effect_data[effect_name] = temp
else:
self._effect_data[effect_name][method_name] = def_data
- def setJob(self, jobs_definition):
+ def setJob(self, jobs_definition): # noqa: N802, D102
self._job_list = pd.DataFrame.from_records(jobs_definition)
def _filter(self, agent_type, entity, action):
@@ -1422,8 +1422,8 @@ def _filter(self, agent_type, entity, action):
]
temp_length = len(temp)
if temp_length > 1:
- raise ValueError('We have more than one job description')
- elif temp_length == 0:
+ raise ValueError('We have more than one job description') # noqa: EM101, TRY003
+ elif temp_length == 0: # noqa: RET506
raise ValueError(
'We have Zero one job description for agent type= '
+ repr(agent_type)
@@ -1434,7 +1434,7 @@ def _filter(self, agent_type, entity, action):
)
return temp
- def getAJobEstimate(
+ def getAJobEstimate( # noqa: N802, D102
self,
orginal_element,
agent_type,
@@ -1452,7 +1452,7 @@ def getAJobEstimate(
if 'FIXED_TIME_OVERWRITE' in overwrite_data:
time_arg = overwrite_data['FIXED_TIME_OVERWRITE']
else:
- raise ValueError('Unknown Time Data')
+ raise ValueError('Unknown Time Data') # noqa: EM101, TRY003
time = int(time_arg)
# try:
# time_arg = int(time_arg):
@@ -1465,39 +1465,39 @@ def getAJobEstimate(
if method_name in self._once[operation_name]:
once_flag = True
- if once_flag == False:
+ if once_flag == False: # noqa: E712
time = int(time * number)
# IMPORTANT/sina
- if (method_name == 2 or method_name == 1) and action == 'reroute':
+ if (method_name == 2 or method_name == 1) and action == 'reroute': # noqa: PLR1714, PLR2004
pass
return time
- def getMeanJobTime(self, agent_type, entity, action):
+ def getMeanJobTime(self, agent_type, entity, action): # noqa: N802, D102
temp = self._filter(agent_type, entity, action)
time_arg = temp['time_argument'].iloc[0]
- if type(time_arg) == int:
+ if type(time_arg) == int: # noqa: E721
time = time_arg
else:
raise ValueError('Unknow time argument: ' + str(type(time_arg)))
return time
- def getAllEffectByJobData(
+ def getAllEffectByJobData( # noqa: N802, D102
self,
agent_type,
action,
entity,
- iWithout_data=True,
- iOnlyData=False,
+ iWithout_data=True, # noqa: FBT002, ARG002, N803
+ iOnlyData=False, # noqa: FBT002, N803
):
temp = self._filter(agent_type, entity, action)
- all_effect_name = temp['effect'].iloc[0]
+ all_effect_name = temp['effect'].iloc[0] # noqa: F841
- if iOnlyData == True:
+ if iOnlyData == True: # noqa: E712
return
- def addEffectDefaultValue(self, input_dict):
+ def addEffectDefaultValue(self, input_dict): # noqa: N802, D102
_key = (
input_dict['effect_definition_name'],
input_dict['method_name'],
@@ -1506,7 +1506,7 @@ def addEffectDefaultValue(self, input_dict):
if _key in self._effect_defualts:
raise ValueError(
- 'Duplicate effects definition: {0}, {1}, {2}'.format(
+ 'Duplicate effects definition: {0}, {1}, {2}'.format( # noqa: EM103, UP030
repr(input_dict['effect_definition_name']),
repr(input_dict['method_name']),
repr(input_dict['argument']),
@@ -1517,36 +1517,36 @@ def addEffectDefaultValue(self, input_dict):
'value'
] # self._effect_defualts.append(temp_s, ignore_index=True)
- def getEffectsList(self, effect_definition_name, method_name):
- if effect_definition_name == None:
+ def getEffectsList(self, effect_definition_name, method_name): # noqa: N802, D102
+ if effect_definition_name == None: # noqa: E711
return []
if effect_definition_name == 'CHECK':
return [{'EFFECT': 'CHECK'}]
all_methods = self._effect_data[effect_definition_name]
effects_list = all_methods[method_name]
- return effects_list
+ return effects_list # noqa: RET504
- def getEffectDefinition(self, effect_definition_name, iWithout_data=True):
+ def getEffectDefinition(self, effect_definition_name, iWithout_data=True): # noqa: FBT002, N802, N803, D102
all_methods = self._effect_data[effect_definition_name]
- if iWithout_data == True and 'DATA' in all_methods:
+ if iWithout_data == True and 'DATA' in all_methods: # noqa: E712
all_methods = copy.deepcopy(all_methods)
all_methods.pop('DATA')
return all_methods
- def getEffectDefinitionName(self, agent_type, action, entity):
+ def getEffectDefinitionName(self, agent_type, action, entity): # noqa: N802, D102
temp = self._filter(agent_type, entity, action)
effects_definition_name = temp['effect'].iloc[0]
- return effects_definition_name
+ return effects_definition_name # noqa: RET504
- def chooseMethodForCurrentJob(self, node_name, effects_definition_name, entity):
+ def chooseMethodForCurrentJob(self, node_name, effects_definition_name, entity): # noqa: N802, D102
returned_method = None
- if effects_definition_name == None:
+ if effects_definition_name == None: # noqa: E711
return None
- elif (
- effects_definition_name == 'CHECK'
+ elif ( # noqa: RET505
+ effects_definition_name == 'CHECK' # noqa: PLR1714
or effects_definition_name == 'FASTCHECK'
or effects_definition_name == 'SKIP'
):
@@ -1555,7 +1555,7 @@ def chooseMethodForCurrentJob(self, node_name, effects_definition_name, entity):
effects_definition = self.getEffectDefinition(
effects_definition_name
) # self._effect_data[effects_definition_name]
- for method_name, effect_list in effects_definition.items():
+ for method_name, effect_list in effects_definition.items(): # noqa: B007, PERF102
prob_applicability = self.iEffectApplicableByProbability(
effects_definition_name, method_name, node_name, entity
)
@@ -1566,58 +1566,58 @@ def chooseMethodForCurrentJob(self, node_name, effects_definition_name, entity):
returned_method = method_name
break
- if returned_method == None:
- try:
+ if returned_method == None: # noqa: E711
+ try: # noqa: SIM105
returned_method = self._final_method[effects_definition_name]
- except:
+ except: # noqa: S110, E722
pass
return returned_method
- def _getProbability(self, method, iCondition, element_type):
- if iCondition == True:
- if 'METHOD_PROBABILITY' in method:
+ def _getProbability(self, method, iCondition, element_type): # noqa: ARG002, N802, N803
+ if iCondition == True: # noqa: E712
+ if 'METHOD_PROBABILITY' in method: # noqa: SIM401
probability = method['METHOD_PROBABILITY']
else:
- probability = 1
+ probability = 1 # noqa: F841
# else:
# if 'METHOD_PROBABILITY' in method:
- def _iConditionHolds(self, val1, con, val2):
+ def _iConditionHolds(self, val1, con, val2): # noqa: C901, N802
if con == 'BG':
- if val1 > val2:
+ if val1 > val2: # noqa: SIM103
return True
- else:
+ else: # noqa: RET505
return False
elif con == 'BG-EQ':
- if val1 >= val2:
+ if val1 >= val2: # noqa: SIM103
return True
- else:
+ else: # noqa: RET505
return False
elif con == 'LT':
- if val1 < val2:
+ if val1 < val2: # noqa: SIM103
return True
- else:
+ else: # noqa: RET505
return False
elif con == 'LT-IF':
- if val1 <= val2:
+ if val1 <= val2: # noqa: SIM103
return True
- else:
+ else: # noqa: RET505
return False
elif con == 'EQ':
- if val1 == val2:
+ if val1 == val2: # noqa: SIM103
return True
- else:
+ else: # noqa: RET505
return False
else:
raise ValueError('Unrecognized condition: ' + repr(con))
- def getDefualtValue(self, effects_definition_name, method_name, argument):
+ def getDefualtValue(self, effects_definition_name, method_name, argument): # noqa: N802, D102
_default = self._effect_defualts
value = _default.get((effects_definition_name, method_name, argument), None)
- return value
+ return value # noqa: RET504
- def iEffectApplicableByOtherConditions(
+ def iEffectApplicableByOtherConditions( # noqa: N802, D102
self,
effects_definition_name,
method_name,
@@ -1632,28 +1632,28 @@ def iEffectApplicableByOtherConditions(
condition = single_effect['PIDR']
_con = condition[0]
_con_val = condition[1]
- _PIDR_type = self.getDefualtValue(
+ _PIDR_type = self.getDefualtValue( # noqa: N806
effects_definition_name, method_name, 'PIDR_TYPE'
)
- if _PIDR_type == None or _PIDR_type == 'ASSIGNED_DEMAND':
- old_demand = self._rm._registry._node_damage_table.loc[
+ if _PIDR_type == None or _PIDR_type == 'ASSIGNED_DEMAND': # noqa: E711, PLR1714
+ old_demand = self._rm._registry._node_damage_table.loc[ # noqa: SLF001
damaged_node_name, 'Demand1'
]
- new_demand = self._rm._registry._node_damage_table.loc[
+ new_demand = self._rm._registry._node_damage_table.loc[ # noqa: SLF001
damaged_node_name, 'Demand2'
]
else:
raise ValueError('unrecognized Setting: ' + _PIDR_type)
- _PIDR = new_demand / old_demand
+ _PIDR = new_demand / old_demand # noqa: N806
- iHold = self._iConditionHolds(_PIDR, _con, _con_val)
+ iHold = self._iConditionHolds(_PIDR, _con, _con_val) # noqa: N806
- return iHold
+ return iHold # noqa: RET504
return True
- def iEffectApplicableByProbability(
+ def iEffectApplicableByProbability( # noqa: N802, D102
self,
effects_definition_name,
method_name,
@@ -1664,12 +1664,12 @@ def iEffectApplicableByProbability(
temp = self.getDefualtValue(
effects_definition_name, method_name, 'METHOD_PROBABILITY'
)
- if temp != None:
+ if temp != None: # noqa: E711
_prob = temp
try:
self._check_probability(_prob)
- except Exception as e:
- print(
+ except Exception as e: # noqa: BLE001
+ print( # noqa: T201
'in Method bsaed Probability of method '
+ str(method_name)
+ ', and definition_name '
@@ -1677,7 +1677,7 @@ def iEffectApplicableByProbability(
+ ', :'
+ str(_prob)
)
- raise ValueError(e)
+ raise ValueError(e) # noqa: B904
# =============================================================================
# if 'DEFAULT' in self._effect_data[effects_definition_name]:
@@ -1695,7 +1695,7 @@ def iEffectApplicableByProbability(
if 'DATA' in self._effect_data[effects_definition_name]:
data = self._effect_data[effects_definition_name]['DATA']
if 'METHOD_PROBABILITY' in data.columns:
- element_name = self._rm._registry.getOrginalElement(
+ element_name = self._rm._registry.getOrginalElement( # noqa: SLF001
damaged_node_name, self._rm.entity[entity]
)
@@ -1720,31 +1720,31 @@ def iEffectApplicableByProbability(
)
try:
self._check_probability(_prob)
- except Exception as e:
- print(
+ except Exception as e: # noqa: BLE001
+ print( # noqa: T201
'in LIST of method '
+ method_name
+ ', and definition_name '
+ effects_definition_name
)
- raise ValueError(e)
+ raise ValueError(e) # noqa: B904
_rand = random.random()
# if effects_definition_name == 'MJTRreroute':
# print(str(method_name) + ' - ' + repr(_prob))
logger.debug(_prob)
- if _rand < _prob:
+ if _rand < _prob: # noqa: SIM103
return True
return False
def _check_probability(self, _prob):
- mes = None
+ mes = None # noqa: F841
_prob = float(_prob)
if _prob < 0:
- raise ValueError('probability cannot be less than 0.')
- elif _prob > 1:
- res = False
- raise ValueError('probability cannot be more than 1.')
+ raise ValueError('probability cannot be less than 0.') # noqa: EM101, TRY003
+ elif _prob > 1: # noqa: RET506
+ res = False # noqa: F841
+ raise ValueError('probability cannot be more than 1.') # noqa: EM101, TRY003
# =============================================================================
diff --git a/modules/systemPerformance/REWET/REWET/restoration/io.py b/modules/systemPerformance/REWET/REWET/restoration/io.py
index 6fb249ab4..93ea12387 100644
--- a/modules/systemPerformance/REWET/REWET/restoration/io.py
+++ b/modules/systemPerformance/REWET/REWET/restoration/io.py
@@ -1,7 +1,7 @@
"""Created on Wed Dec 19 19:10:35 2020
@author: snaeimi
-"""
+""" # noqa: INP001, D400
import logging
from collections import OrderedDict
@@ -29,7 +29,7 @@ def _split_line(line):
return _vals, _cmnt
-class RestorationIO:
+class RestorationIO: # noqa: D101
def __init__(self, restoration_model, definition_file_name):
"""Needs a file that contains:
@@ -44,7 +44,7 @@ def __init__(self, restoration_model, definition_file_name):
-------
None.
- """
+ """ # noqa: D400
# some of the following lines have been adopted from WNTR
self.rm = restoration_model
self.crew_data = {}
@@ -82,23 +82,23 @@ def __init__(self, restoration_model, definition_file_name):
self.config_file_dir = config_file_path.parent
- with open(definition_file_name, encoding='utf-8') as f:
+ with open(definition_file_name, encoding='utf-8') as f: # noqa: PTH123
for line in f:
lnum += 1
edata['lnum'] = lnum
- line = line.strip()
+ line = line.strip() # noqa: PLW2901
nwords = len(line.split())
if len(line) == 0 or nwords == 0:
# Blank line
continue
- elif line.startswith('['):
+ elif line.startswith('['): # noqa: RET507
vals = line.split()
sec = vals[0].upper()
edata['sec'] = sec
if sec in expected_sections:
section = sec
continue
- else:
+ else: # noqa: RET507
raise RuntimeError(
'%(fname)s:%(lnum)d: Invalid section "%(sec)s"' % edata
)
@@ -135,7 +135,7 @@ def _read_files(self):
edata['lnum'] = lnum
words, comments = _split_line(line)
if words is not None and len(words) > 0:
- if len(words) != 2:
+ if len(words) != 2: # noqa: PLR2004
edata['key'] = words[0]
raise RuntimeError(
'%(fname)s:%(lnum)-6d %(sec)13s no value provided for %(key)s'
@@ -148,34 +148,34 @@ def _read_files(self):
for file_handle, file_address in self._file_handle_address.items():
self._file_data[file_handle] = self._read_each_file(file_address)
- self.rm._files = self._file_data
+ self.rm._files = self._file_data # noqa: SLF001
def _read_each_file(self, file_address, method=0):
lnum = 0
- iTitle = True
+ iTitle = True # noqa: N806
data_temp = None
if method == 0:
try:
- raise
- with open(file_address, encoding='utf-8') as f:
+ raise # noqa: PLE0704
+ with open(file_address, encoding='utf-8') as f: # noqa: PTH123
for line in f:
- line = line.strip()
+ line = line.strip() # noqa: PLW2901
nwords = len(line.split())
if len(line) == 0 or nwords == 0:
# Blank line
continue
- elif line.startswith(';'):
+ elif line.startswith(';'): # noqa: RET507
# comment
continue
else:
lnum += 1
vals = line.split()
- if iTitle == True:
- iTitle = False
+ if iTitle == True: # noqa: E712
+ iTitle = False # noqa: N806
data_temp = pd.DataFrame(columns=vals)
else:
data_temp.loc[lnum - 2] = vals
- except:
+ except: # noqa: E722
data_temp = self._read_each_file(file_address, method=1)
elif method == 1:
file_address = self.config_file_dir / file_address
@@ -187,13 +187,13 @@ def _read_each_file(self, file_address, method=0):
def _read_shifts(self):
# self._shift_data=pd.DataFrame()
# self._file_handle_address = {}
- for lnum, line in self.sections['[SHIFTS]']:
+ for lnum, line in self.sections['[SHIFTS]']: # noqa: B007
# edata['lnum'] = lnum
words, comments = _split_line(line)
if words is not None and len(words) > 0:
- if len(words) != 3:
- raise RuntimeError(
- '%(fname)s:%(lnum)-6d %(sec)13s no value provided for %(key)s'
+ if len(words) != 3: # noqa: PLR2004
+ raise RuntimeError( # noqa: TRY003
+ '%(fname)s:%(lnum)-6d %(sec)13s no value provided for %(key)s' # noqa: EM101
)
shift_name = words[0]
shift_begining = int(words[1]) * 3600
@@ -201,15 +201,15 @@ def _read_shifts(self):
self.rm.shifting.addShift(shift_name, shift_begining, shift_ending)
- def _read_entities(self):
+ def _read_entities(self): # noqa: C901
for lnum, line in self.sections['[ENTITIES]']:
arg1 = None
arg2 = None
words, comments = _split_line(line)
if words is not None and len(words) > 0:
- if len(words) != 2 and len(words) != 4:
- raise RuntimeError(
- '%(fname)s:%(lnum)-6d %(sec)13s no value provided for %(key)s'
+ if len(words) != 2 and len(words) != 4: # noqa: PLR2004
+ raise RuntimeError( # noqa: TRY003
+ '%(fname)s:%(lnum)-6d %(sec)13s no value provided for %(key)s' # noqa: EM101
)
entity_name = words[0]
element = words[1].upper()
@@ -220,18 +220,18 @@ def _read_entities(self):
# if entity_name in self.rm.entity:
# raise ValueError('Entity already defined')
- if len(words) == 4:
+ if len(words) == 4: # noqa: PLR2004
arg1 = words[2]
arg2 = words[3]
if (
- element == 'PIPE'
- and arg1 not in self.rm._registry._pipe_damage_table.columns
+ element == 'PIPE' # noqa: PLR1714
+ and arg1 not in self.rm._registry._pipe_damage_table.columns # noqa: SLF001
and arg1 != 'FILE'
and arg1 != 'NOT_IN_FILE'
) and (
element == 'DISTNODE'
- and arg1 not in self.rm._registry._node_damage_table.columns
+ and arg1 not in self.rm._registry._node_damage_table.columns # noqa: SLF001
):
raise ValueError(
'Argument 1('
@@ -240,7 +240,7 @@ def _read_entities(self):
+ str(lnum)
)
- if arg1 == None:
+ if arg1 == None: # noqa: E711
self.rm.entity[entity_name] = element
ent_rule = [('ALL', None, None)]
@@ -249,23 +249,23 @@ def _read_entities(self):
else:
self.rm.entity_rule[entity_name].append(ent_rule[0])
- self.rm._registry.addAttrToElementDamageTable(
+ self.rm._registry.addAttrToElementDamageTable( # noqa: SLF001
element,
entity_name,
- True,
+ True, # noqa: FBT003
)
- elif arg1 == 'FILE' or arg1 == 'NOT_IN_FILE':
- name_list = self.rm._files[arg2]['ElementID'].unique().tolist()
+ elif arg1 == 'FILE' or arg1 == 'NOT_IN_FILE': # noqa: PLR1714
+ name_list = self.rm._files[arg2]['ElementID'].unique().tolist() # noqa: SLF001
ent_rule = [(arg1, None, name_list)]
self.rm.entity[entity_name] = element
if entity_name not in self.rm.entity_rule:
self.rm.entity_rule[entity_name] = ent_rule
- self.rm._registry.addAttrToElementDamageTable(
+ self.rm._registry.addAttrToElementDamageTable( # noqa: SLF001
element,
entity_name,
- True,
+ True, # noqa: FBT003
)
else:
self.rm.entity_rule[entity_name].append(ent_rule[0])
@@ -274,7 +274,7 @@ def _read_entities(self):
if ':' in arg2:
split_arg = arg2.split(':')
- if len(split_arg) != 2:
+ if len(split_arg) != 2: # noqa: PLR2004
raise ValueError(
'There must be two parts: PART1:PART2. Now there are '
+ repr(
@@ -303,7 +303,7 @@ def _read_entities(self):
try:
temp_arg3 = float(arg3)
- except:
+ except: # noqa: E722
temp_arg3 = str(arg3)
arg3 = temp_arg3
@@ -311,10 +311,10 @@ def _read_entities(self):
if entity_name not in self.rm.entity:
self.rm.entity[entity_name] = element
self.rm.entity_rule[entity_name] = ent_rule
- self.rm._registry.addAttrToElementDamageTable(
+ self.rm._registry.addAttrToElementDamageTable( # noqa: SLF001
element,
entity_name,
- True,
+ True, # noqa: FBT003
)
else:
if self.rm.entity[entity_name] != element:
@@ -342,7 +342,7 @@ def _read_entities(self):
# =============================================================================
def _read_sequences(self):
- for lnum, line in self.sections['[SEQUENCES]']:
+ for lnum, line in self.sections['[SEQUENCES]']: # noqa: B007
words, comments = _split_line(line)
if words is not None and len(words) > 0:
# if len(words) != 2 or len(words)!=4:
@@ -350,37 +350,37 @@ def _read_sequences(self):
element = words[0].upper()
seq = []
for arg in words[1:]:
- seq.append(arg)
+ seq.append(arg) # noqa: PERF402
if element in self.rm.sequence:
- raise ValueError('Element already in sequences')
+ raise ValueError('Element already in sequences') # noqa: EM101, TRY003
self.rm.sequence[element] = seq
for el in self.rm.sequence:
if el in self.rm.ELEMENTS:
for action in self.rm.sequence[el]:
- self.rm._registry.addAttrToElementDamageTable(el, action, None)
+ self.rm._registry.addAttrToElementDamageTable(el, action, None) # noqa: SLF001
def _read_agents(self):
agent_file_handle = {}
group_names = {}
group_column = {}
- for lnum, line in self.sections['[AGENTS]']:
+ for lnum, line in self.sections['[AGENTS]']: # noqa: B007
# edata['lnum'] = lnum
words, comments = _split_line(line)
if words is not None and len(words) > 0:
_group_name = None
_group_column = None
- if len(words) < 3:
- raise RuntimeError(
- '%(fname)s:%(lnum)-6d %(sec)13s no value provided for %(key)s'
+ if len(words) < 3: # noqa: PLR2004
+ raise RuntimeError( # noqa: TRY003
+ '%(fname)s:%(lnum)-6d %(sec)13s no value provided for %(key)s' # noqa: EM101
)
agent_type = words[0]
if words[1].upper() == 'FILE':
agent_file_handle[words[0]] = words[2]
else:
- raise ValueError('Unknown key')
- if len(words) >= 4:
+ raise ValueError('Unknown key') # noqa: EM101, TRY003
+ if len(words) >= 4: # noqa: PLR2004
group_data = words[3]
_group_name = group_data.split(':')[0]
_group_column = group_data.split(':')[1]
@@ -396,7 +396,7 @@ def _read_agents(self):
agent_number = data['Number']
j = 0
- for lnum, line in data.iterrows():
+ for lnum, line in data.iterrows(): # noqa: B007
# try:
num = int(agent_number[j])
# except :
@@ -415,7 +415,7 @@ def _read_agents(self):
definitions['shift_name'] = predefinitions['Shift']
group_name_temp = None
- if group_names[agent_type] != None:
+ if group_names[agent_type] != None: # noqa: E711
definitions['group'] = predefinitions[
group_column[agent_type]
]
@@ -426,14 +426,14 @@ def _read_agents(self):
definitions['group_name'] = group_name_temp
self.rm.agents.addAgent(agent_name, agent_type, definitions)
- j += 1
+ j += 1 # noqa: SIM113
def _read_groups(self):
for lnum, line in self.sections['[GROUPS]']:
words, comments = _split_line(line)
if words is not None and len(words) > 0:
- if len(words) != 6:
+ if len(words) != 6: # noqa: PLR2004
raise ValueError(
'error in line: ' + str(lnum) + ': ' + repr(len(words))
)
@@ -441,8 +441,8 @@ def _read_groups(self):
element_type = words[1]
argument = words[2]
file_handler = words[3]
- element_col_ID = words[4]
- pipe_col_ID = words[5]
+ element_col_ID = words[4] # noqa: N806
+ pipe_col_ID = words[5] # noqa: N806
if element_type not in self.rm.ELEMENTS:
raise ValueError(
@@ -456,7 +456,7 @@ def _read_groups(self):
'the Only acceptable argument is FILE. Line: ' + repr(lnum)
)
- data = self.rm._files[file_handler]
+ data = self.rm._files[file_handler] # noqa: SLF001
if pipe_col_ID not in data:
raise ValueError(
@@ -490,14 +490,14 @@ def _read_groups(self):
self.rm.group[element_type][group_name] = group_list
- def _read_points(self):
+ def _read_points(self): # noqa: C901
for lnum, line in self.sections['[POINTS]']:
words, comments = _split_line(line)
if words is None or len(words) < 1: # Empty Line
continue
- if not len(words) >= 2: # Syntax Error
+ if not len(words) >= 2: # Syntax Error # noqa: PLR2004
raise ValueError(
'Syntax error in line: '
+ str(lnum)
@@ -540,7 +540,7 @@ def _read_points(self):
)
x_y_coord = word.split(':')
- if len(x_y_coord) > 2:
+ if len(x_y_coord) > 2: # noqa: PLR2004
raise ValueError(
'Syntax error in line: '
+ str(lnum)
@@ -559,8 +559,8 @@ def _read_points(self):
try:
x_coord = float(x_coord)
- except:
- raise ValueError(
+ except: # noqa: E722
+ raise ValueError( # noqa: B904
'Syntax error in line: '
+ str(lnum)
+ '\n'
@@ -573,8 +573,8 @@ def _read_points(self):
try:
y_coord = float(y_coord)
- except:
- raise ValueError(
+ except: # noqa: E722
+ raise ValueError( # noqa: B904
'Syntax error in line: '
+ str(lnum)
+ '\n'
@@ -595,7 +595,7 @@ def _read_points(self):
else:
self.rm.proximity_points[group_name] = current_group_point_list
- def _read_priorities(self):
+ def _read_priorities(self): # noqa: C901
agent_type_list = self.rm.agents.getAllAgentTypes()
for lnum, line in self.sections['[PRIORITIES]']:
words, comments = _split_line(line)
@@ -603,7 +603,7 @@ def _read_priorities(self):
if words is None or len(words) < 1:
continue
- if not len(words) >= 3:
+ if not len(words) >= 3: # noqa: PLR2004
raise ValueError(
'Syntax error in line: '
+ str(lnum)
@@ -631,11 +631,11 @@ def _read_priorities(self):
try:
priority_type = int(words[1])
- except:
+ except: # noqa: E722
try:
priority_type = int(float(words[1]))
- except:
- raise ValueError(
+ except: # noqa: E722
+ raise ValueError( # noqa: B904
'Syntax error in line: '
+ str(lnum)
+ '\n'
@@ -678,7 +678,7 @@ def _read_priorities(self):
)
split_temp = word.split(':')
- if len(split_temp) > 2:
+ if len(split_temp) > 2: # noqa: PLR2004
raise ValueError(
'Syntax error in line: '
+ str(lnum)
@@ -725,7 +725,7 @@ def _read_priorities(self):
arg.append((action, damage_group))
- elif priority_type == 2:
+ elif priority_type == 2: # noqa: PLR2004
if (
word not in self.rm.proximity_points
and word not in self.rm.reserved_priority_names
@@ -748,8 +748,8 @@ def _read_priorities(self):
self.rm.priority.addData(agent_type, priority_type, arg)
- for crew_type in self.rm.priority._data:
- priority_list = self.rm.priority._data[crew_type]
+ for crew_type in self.rm.priority._data: # noqa: SLF001
+ priority_list = self.rm.priority._data[crew_type] # noqa: SLF001
primary_priority_order_list = priority_list[1]
secondary_priority_order_list = priority_list[2]
if len(primary_priority_order_list) != len(
@@ -763,7 +763,7 @@ def _read_priorities(self):
not_defined = []
for agent_type in agent_type_list:
if not self.rm.priority.isAgentTypeInPriorityData(agent_type):
- not_defined.append(agent_type)
+ not_defined.append(agent_type) # noqa: PERF401
if len(not_defined) > 0:
raise ValueError(
@@ -778,7 +778,7 @@ def _read_jobs(self):
words, comments = _split_line(line)
if words is not None and len(words) > 0:
- if not len(words) >= 3:
+ if not len(words) >= 3: # noqa: PLR2004
raise ValueError(
'Not enough arguments. error in line: ' + str(lnum)
)
@@ -807,13 +807,13 @@ def _read_jobs(self):
if definer.upper() == 'FIXED':
try:
argument = int(argument)
- except:
- print('exeption handled in _read_jobs')
+ except: # noqa: E722
+ print('exeption handled in _read_jobs') # noqa: T201
else:
raise ValueError('Definer is not recognized: ' + definer)
effect = None
- if len(words) >= 4:
+ if len(words) >= 4: # noqa: PLR2004
effect = words[3]
cur_job_definition = {
@@ -826,9 +826,9 @@ def _read_jobs(self):
jobs_definition.append(cur_job_definition)
self.rm.jobs.setJob(jobs_definition)
- def _read_define(self):
- job = {}
- used_jobs = self.rm.jobs._job_list.effect.unique().tolist()
+ def _read_define(self): # noqa: C901, PLR0912
+ job = {} # noqa: F841
+ used_jobs = self.rm.jobs._job_list.effect.unique().tolist() # noqa: SLF001
if None in used_jobs:
used_jobs.remove(None)
@@ -849,7 +849,7 @@ def _read_define(self):
)
try:
method_name = float(words[1])
- except:
+ except: # noqa: E722
method_name = words[1]
res_list = []
@@ -883,15 +883,15 @@ def _read_define(self):
if main_arg == 'RECONNECT':
if arg == 'PIPESIZE':
if 'PIPESIZEFACTOR' in res:
- raise ValueError(
- 'Either pipe size or pipe size factor can be defined'
+ raise ValueError( # noqa: TRY003
+ 'Either pipe size or pipe size factor can be defined' # noqa: EM101
)
res['PIPESIZE'] = float(val)
elif arg == 'PIPESIZEFACTOR':
if 'PIPESIZE' in res:
- raise ValueError(
- 'Either pipe size or pipe size factor can be defined'
+ raise ValueError( # noqa: TRY003
+ 'Either pipe size or pipe size factor can be defined' # noqa: EM101
)
val = float(val)
if val > 1 or val < 0:
@@ -901,9 +901,9 @@ def _read_define(self):
)
res['PIPESIZEFACTOR'] = float(val)
elif arg == 'CV':
- if val == 'TRUE' or val == '1':
+ if val == 'TRUE' or val == '1': # noqa: PLR1714
val = True
- elif val == 'FALSE' or val == '0':
+ elif val == 'FALSE' or val == '0': # noqa: PLR1714
val = False
else:
raise ValueError(
@@ -918,21 +918,21 @@ def _read_define(self):
res['CV'] = val
elif arg == 'PIPELENGTH':
try:
- val == float(val)
+ val == float(val) # noqa: B015
except Exception as e:
- print(
+ print( # noqa: T201
'The value for PIPELENGTH must be a number'
)
- raise e
+ raise e # noqa: TRY201
res['PIPELENGTH'] = val
elif arg == 'PIPEFRICTION':
try:
- val == float(val)
+ val == float(val) # noqa: B015
except Exception as e:
- print(
+ print( # noqa: T201
'The value for PIPEFRICTION must be a number'
)
- raise e
+ raise e # noqa: TRY201
res['PIPEFRICTION'] = val
else:
raise ValueError(
@@ -946,9 +946,9 @@ def _read_define(self):
res['PUMP'] = float(val)
elif arg == 'CV':
- if val == 'TRUE' or val == '1':
+ if val == 'TRUE' or val == '1': # noqa: PLR1714
val = True
- elif val == 'FALSE' or val == '0':
+ elif val == 'FALSE' or val == '0': # noqa: PLR1714
val = False
else:
raise ValueError(
@@ -989,8 +989,8 @@ def _read_define(self):
)
elif main_arg == 'COL_CLOSE_PIPE':
- raise ValueError(
- 'REPAIR at this stage does not accept any argument'
+ raise ValueError( # noqa: TRY003
+ 'REPAIR at this stage does not accept any argument' # noqa: EM101
)
elif main_arg == 'ISOLATE_DN':
@@ -1000,7 +1000,7 @@ def _read_define(self):
or val[-1] != ')'
or val.find(',') == -1
):
- ValueError(
+ ValueError( # noqa: PLW0133
'After PIDR the format must be like (CONDIION,VALUE)'
)
@@ -1010,7 +1010,7 @@ def _read_define(self):
_con_val = float(val_split[1])
if not (
- _con == 'BG'
+ _con == 'BG' # noqa: PLR1714
or _con == 'EQ'
or _con == 'LT'
or _con == 'BG-EQ'
@@ -1029,14 +1029,14 @@ def _read_define(self):
res['PIDR'] = (_con, _con_val)
elif main_arg == 'REPAIR':
- raise ValueError(
- 'REPAIR at this stage does not accept any argument'
+ raise ValueError( # noqa: TRY003
+ 'REPAIR at this stage does not accept any argument' # noqa: EM101
)
elif method_name.upper() == 'DEFAULT':
- try:
+ try: # noqa: SIM105
arg = int(arg)
- except:
+ except: # noqa: S110, E722
pass
if main_arg == 'METHOD_PROBABILITY':
@@ -1044,15 +1044,15 @@ def _read_define(self):
if val < 0:
raise ValueError(
- 'Probability cannot be less than zero. '
+ 'Probability cannot be less than zero. ' # noqa: ISC003
+ ' In line '
+ lnum
+ ' probability: '
+ val
)
- elif val > 1:
+ elif val > 1: # noqa: RET506
raise ValueError(
- 'Probability cannot be bigger than 1. '
+ 'Probability cannot be bigger than 1. ' # noqa: ISC003
+ ' In line '
+ lnum
+ ' probability: '
@@ -1071,22 +1071,22 @@ def _read_define(self):
val = None
else:
val = None
- print(
+ print( # noqa: T201
'WARNING: At default line in FINAL section, the third argument is not NULL: '
+ str(val)
+ 'The value is ignored antywhere'
)
- self.rm.jobs._final_method[job_name] = arg
+ self.rm.jobs._final_method[job_name] = arg # noqa: SLF001
elif main_arg == 'ONLYONCE':
- try:
+ try: # noqa: SIM105
val = float(val)
- except:
+ except: # noqa: S110, E722
pass
- if job_name in self.rm.jobs._once:
- self.rm.jobs._once[job_name].append(val)
+ if job_name in self.rm.jobs._once: # noqa: SLF001
+ self.rm.jobs._once[job_name].append(val) # noqa: SLF001
else:
- self.rm.jobs._once[job_name] = [val]
+ self.rm.jobs._once[job_name] = [val] # noqa: SLF001
else:
raise ValueError(
'Unrecognized argument in line '
@@ -1106,7 +1106,7 @@ def _read_define(self):
i += 2
res_list.append(res)
- if flag == False:
+ if flag == False: # noqa: E712
self.rm.jobs.addEffect(job_name, method_name, res_list)
# for self.rm.effects.pruneData()
@@ -1117,7 +1117,7 @@ def _read_file_effect(self, file_info, effect_name):
file_handle = file_info[0]
file_data = file_info[1:]
- data = self.rm._files[file_handle]
+ data = self.rm._files[file_handle] # noqa: SLF001
# columns_to_remove = data.columns.tolist()
aliases = {}
@@ -1134,7 +1134,7 @@ def _read_file_effect(self, file_info, effect_name):
if val not in data.columns:
raise ValueError('Value not in file: ' + val)
if (
- arg == 'ELEMENT_NAME'
+ arg == 'ELEMENT_NAME' # noqa: PLR1714
or arg == 'METHOD_NAME'
or arg == 'METHOD_PROBABILITY'
):
@@ -1157,7 +1157,7 @@ def _read_file_effect(self, file_info, effect_name):
{'FIXED_TIME_OVERWRITE': int(time_overwrite_data[i] * 3600)}
for i in range(len(time_overwrite_data))
]
- self.rm.jobs._time_overwrite.update(
+ self.rm.jobs._time_overwrite.update( # noqa: SLF001
pd.Series(index=_key, data=time_overwrite_data).to_dict()
)
@@ -1165,7 +1165,7 @@ def _read_file_effect(self, file_info, effect_name):
raise ValueError('Unrecognized argument in pair: ' + _arg)
res = pd.DataFrame(res)
# print(res)
- return res
+ return res # noqa: RET504
def _read_config(self):
"""Reads config files which contains general specification of
@@ -1180,7 +1180,7 @@ def _read_config(self):
-------
None.
- """
+ """ # noqa: D205, D400, D401
edata = OrderedDict()
self._crew_file_name = []
self._crew_file_type = []
@@ -1188,7 +1188,7 @@ def _read_config(self):
edata['lnum'] = lnum
words, comments = _split_line(line)
if words is not None and len(words) > 0:
- if len(words) < 2:
+ if len(words) < 2: # noqa: PLR2004
edata['key'] = words[0]
raise RuntimeError(
'%(fname)s:%(lnum)-6d %(sec)13s no value provided for %(key)s'
@@ -1205,66 +1205,66 @@ def _read_config(self):
self._read_crew()
def _read_demand_nodes(self):
- titles = []
+ titles = [] # noqa: F841
ntitle = 0
lnum = 0
dtemp = []
- with open(self._demand_Node_file_name, encoding='utf-8') as f:
+ with open(self._demand_Node_file_name, encoding='utf-8') as f: # noqa: PTH123
for line in f:
lnum += 1
- line = line.strip()
+ line = line.strip() # noqa: PLW2901
nwords = len(line.split())
words = line.split()
if len(line) == 0 or nwords == 0:
# Blank line
continue
- elif line.upper().startswith('NODEID'):
+ elif line.upper().startswith('NODEID'): # noqa: RET507
title = words.copy()
ntitle = len(
words
) # we need this to confirm that every line has data for every title(column)
continue
elif nwords != ntitle:
- raise ValueError(
- '%{fname}s:%(lnum)d: Number of data does not match number of titles'
+ raise ValueError( # noqa: TRY003
+ '%{fname}s:%(lnum)d: Number of data does not match number of titles' # noqa: EM101
)
elif nwords == ntitle:
dtemp.append(words)
else:
- raise ValueError(
- '%{fname}s:%(lnum)d:This error must nnever happen'
+ raise ValueError( # noqa: TRY003
+ '%{fname}s:%(lnum)d:This error must nnever happen' # noqa: EM101
)
self.demand_node = pd.DataFrame(dtemp, columns=title)
def _read_crew(self):
- titles = []
+ titles = [] # noqa: F841
ntitle = 0
lnum = 0
dtemp = []
- with open(self._crew_file_name[-1], encoding='utf-8') as f:
+ with open(self._crew_file_name[-1], encoding='utf-8') as f: # noqa: PTH123
for line in f:
lnum += 1
- line = line.strip()
+ line = line.strip() # noqa: PLW2901
nwords = len(line.split())
words = line.split()
if len(line) == 0 or nwords == 0:
# Blank line
continue
- elif line.upper().startswith('DISTYARDID'):
+ elif line.upper().startswith('DISTYARDID'): # noqa: RET507
title = words.copy()
ntitle = len(
words
) # we need this to confirm that every line has data for every title(column)
continue
elif nwords != ntitle:
- raise ValueError(
- '%{fname}s:%(lnum)d: Number of data does not match number of titles'
+ raise ValueError( # noqa: TRY003
+ '%{fname}s:%(lnum)d: Number of data does not match number of titles' # noqa: EM101
)
elif nwords == ntitle:
dtemp.append(words)
else:
- raise ValueError(
- '%{fname}s:%(lnum)d:This error must nnever happen'
+ raise ValueError( # noqa: TRY003
+ '%{fname}s:%(lnum)d:This error must nnever happen' # noqa: EM101
)
self.crew_data[self._crew_file_type[-1]] = pd.DataFrame(
dtemp, columns=title
diff --git a/modules/systemPerformance/REWET/REWET/restoration/model.py b/modules/systemPerformance/REWET/REWET/restoration/model.py
index ee819c460..b5d631b19 100644
--- a/modules/systemPerformance/REWET/REWET/restoration/model.py
+++ b/modules/systemPerformance/REWET/REWET/restoration/model.py
@@ -1,7 +1,7 @@
"""Created on Fri Dec 25 05:09:25 2020
@author: snaeimi
-"""
+""" # noqa: INP001, D400
import logging
import random
@@ -19,7 +19,7 @@
logger = logging.getLogger(__name__)
-class Restoration:
+class Restoration: # noqa: D101
def __init__(self, conifg_file_name, registry, damage):
self.ELEMENTS = ['PIPE', 'DISTNODE', 'GNODE', 'TANK', 'PUMP', 'RESERVOIR']
self._CONDITIONS = ['EQ', 'BG', 'LT', 'BG-EQ', 'LT-EQ', 'NOTEQ']
@@ -66,7 +66,7 @@ def __init__(self, conifg_file_name, registry, damage):
self.ApplyOverrides()
- def ApplyOverrides(self):
+ def ApplyOverrides(self): # noqa: N802, D102
overrides = self._registry.settings.overrides
if 'POINTS' in overrides:
@@ -74,7 +74,7 @@ def ApplyOverrides(self):
for point_group_name in points_overrides:
if point_group_name not in self.proximity_points:
logger.warning(
- 'CAUTION!'
+ 'CAUTION!' # noqa: ISC003, G003
+ '\n'
+ 'Override Point Group '
+ repr(point_group_name)
@@ -84,16 +84,16 @@ def ApplyOverrides(self):
point_group_name
]
- def perform_action(self, wn, stop_time):
+ def perform_action(self, wn, stop_time): # noqa: C901, D102
logger.debug(stop_time)
# checks if the restoration is started
- if self.eq_time == None or self.restoration_start_time == None:
- raise ValueError('restoration is not initiated')
+ if self.eq_time == None or self.restoration_start_time == None: # noqa: E711
+ raise ValueError('restoration is not initiated') # noqa: EM101, TRY003
# checks if the stop time is a hard event
if not self._isHardEvent(stop_time):
- raise RuntimeError('stop time is not a hard event')
+ raise RuntimeError('stop time is not a hard event') # noqa: EM101, TRY003
# gets the latest damage revealed and reported to the damage board registry
self.dispatch.updateDiscovery(stop_time)
@@ -125,26 +125,26 @@ def perform_action(self, wn, stop_time):
logger.warning('-----------------')
for r_agent in released_agents:
- agent_type = self.agents._agents.loc[r_agent, 'type']
- action = self.agents._agents.loc[r_agent, 'data'].cur_job_action
- entity = self.agents._agents.loc[r_agent, 'data'].cur_job_entity
- effect_definition_name = self.agents._agents.loc[
+ agent_type = self.agents._agents.loc[r_agent, 'type'] # noqa: SLF001
+ action = self.agents._agents.loc[r_agent, 'data'].cur_job_action # noqa: SLF001
+ entity = self.agents._agents.loc[r_agent, 'data'].cur_job_entity # noqa: SLF001
+ effect_definition_name = self.agents._agents.loc[ # noqa: SLF001
r_agent, 'data'
].cur_job_effect_definition_name
- method_name = self.agents._agents.loc[
+ method_name = self.agents._agents.loc[ # noqa: SLF001
r_agent, 'data'
].cur_job_method_name
- damaged_node_name = self.agents._agents.loc[
+ damaged_node_name = self.agents._agents.loc[ # noqa: SLF001
r_agent, 'data'
].cur_job_location
- iOngoing = self.agents._agents.loc[r_agent, 'data'].cur_job_ongoing
+ iOngoing = self.agents._agents.loc[r_agent, 'data'].cur_job_ongoing # noqa: SLF001, N806
element_type = self.entity[entity]
effects_list = self.jobs.getEffectsList(
effect_definition_name, method_name
)
- if iOngoing == False:
+ if iOngoing == False: # noqa: E712
# This must be before apply effect because if not, bypass pipe will not be removed/Sina
_damage_data = self._registry.getDamageData(
element_type, iCopy=False
@@ -168,25 +168,25 @@ def perform_action(self, wn, stop_time):
)
if len(collective_damage_data_name_list) > 0:
next_action = self.getNextSequence(element_type, action)
- if next_action != None:
+ if next_action != None: # noqa: E711
self._registry.setDamageDataByRowAndColumn(
element_type,
collective_damage_data_name_list,
next_action,
- False,
+ False, # noqa: FBT003
)
self._registry.setDamageDataByRowAndColumn(
element_type,
collective_damage_data_name_list,
'discovered',
- True,
+ True, # noqa: FBT003
)
self._registry.updateElementDamageTable(
element_type,
action,
damaged_node_name,
- True,
+ True, # noqa: FBT003
)
for single_effect in effects_list:
self.applyEffect(
@@ -200,9 +200,9 @@ def perform_action(self, wn, stop_time):
next_action = self.getNextSequence(element_type, action)
- if next_action != None:
+ if next_action != None: # noqa: E711
if (
- type(_damage_data.loc[damaged_node_name, next_action])
+ type(_damage_data.loc[damaged_node_name, next_action]) # noqa: E721
== str
):
pass
@@ -213,7 +213,7 @@ def perform_action(self, wn, stop_time):
element_type,
next_action,
damaged_node_name,
- False,
+ False, # noqa: FBT003
icheck=True,
)
else:
@@ -235,7 +235,7 @@ def perform_action(self, wn, stop_time):
# for each agent type, we get the priority data (entity and action), refine damage data from entity that are waiting for action (action = False)
for agent_type in ready_agent_types:
typed_ready_agent = ready_agent[ready_agent['type'] == agent_type]
- typed_ready_agent._is_copy = None
+ typed_ready_agent._is_copy = None # noqa: SLF001
if not len(typed_ready_agent) > 0:
continue
@@ -246,16 +246,16 @@ def perform_action(self, wn, stop_time):
)
non_tagged_typed_ready_agent = typed_ready_agent.copy()
- non_tagged_typed_ready_agent._is_copy = None
+ non_tagged_typed_ready_agent._is_copy = None # noqa: SLF001
for agent_group_tag in agent_group_tag_list:
typed_ready_agent = non_tagged_typed_ready_agent[
non_tagged_typed_ready_agent['group'] == agent_group_tag
]
- typed_ready_agent._is_copy = None
+ typed_ready_agent._is_copy = None # noqa: SLF001
order_counter = -1
for prime_priority in agent_prime_priority_list:
order_counter += 1
- action = list(prime_priority)[0]
+ action = list(prime_priority)[0] # noqa: RUF015
entity = list(prime_priority)[1]
damage_data = self._registry.getDamageData(self.entity[entity])
entity_data = self.refineEntityDamageTable(
@@ -266,12 +266,12 @@ def perform_action(self, wn, stop_time):
)
if len(entity_data) == 0:
continue
- entity_data = entity_data[(entity_data['discovered'] == True)]
- entity_data = entity_data[(entity_data[entity] == True)]
- entity_data = entity_data[(entity_data[action] == False)]
+ entity_data = entity_data[(entity_data['discovered'] == True)] # noqa: E712
+ entity_data = entity_data[(entity_data[entity] == True)] # noqa: E712
+ entity_data = entity_data[(entity_data[action] == False)] # noqa: E712
logger.warning(
- 'action='
+ 'action=' # noqa: G003
+ action
+ ', entity='
+ entity
@@ -284,7 +284,7 @@ def perform_action(self, wn, stop_time):
if previous_action == action:
break
entity_data = entity_data[
- (entity_data[previous_action] != False)
+ (entity_data[previous_action] != False) # noqa: E712
]
vacant_job_list = self._registry.getVacantOnGoingJobs(
@@ -317,22 +317,22 @@ def perform_action(self, wn, stop_time):
if res == 'break':
break
- elif res == 'continue':
+ elif res == 'continue': # noqa: RET508
continue
new_events = self.getNewEventsTime(reset=True)
self._registry.restoration_log_book.updateAgentLogBook(
- self.agents._agents,
+ self.agents._agents, # noqa: SLF001
stop_time,
)
self._registry.restoration_log_book.updateAgentHistory(
- self.agents._agents,
+ self.agents._agents, # noqa: SLF001
stop_time,
)
return new_events
- def perform_action_helper(
+ def perform_action_helper( # noqa: C901, D102
self,
typed_ready_agent,
entity_data,
@@ -342,20 +342,20 @@ def perform_action_helper(
stop_time,
order_counter,
wn,
- flag=False,
+ flag=False, # noqa: FBT002
):
ignore_list = []
if len(entity_data) == 0:
- if flag == True:
- raise RuntimeError(
- 'Ongoing and zero-length emtity data does must never appended together.'
+ if flag == True: # noqa: E712
+ raise RuntimeError( # noqa: TRY003
+ 'Ongoing and zero-length emtity data does must never appended together.' # noqa: EM101
)
return 'continue'
entity_data = self.priority.sortDamageTable(
wn, entity_data, entity, agent_type, 2, order_counter
) # sort according to the possible secondary priority
- for node_name, damage_data in entity_data.iterrows():
+ for node_name, damage_data in entity_data.iterrows(): # noqa: RET503
if not len(typed_ready_agent) > 0:
break
@@ -381,7 +381,7 @@ def perform_action_helper(
distnace_agent_entity.apply(
lambda x: typed_ready_agent.loc[x, 'data'].getDistanceFromCoordinate(
- coord
+ coord # noqa: B023
)
)
@@ -390,7 +390,7 @@ def perform_action_helper(
# distnace_agent_entity.loc[agent_name] = d_agent['data'].getDistanceFromCoordinate(coord)
# ---------------------------------
- distnace_agent_entity.sort_values(ascending=True, inplace=True)
+ distnace_agent_entity.sort_values(ascending=True, inplace=True) # noqa: PD002
if self.entity[entity] == 'PIPE':
orginal_element = entity_data.loc[node_name, 'Orginal_element']
else:
@@ -399,7 +399,7 @@ def perform_action_helper(
while len(distnace_agent_entity) > 0:
choosed_agent_name = distnace_agent_entity.index[0]
- if flag == False:
+ if flag == False: # noqa: E712
i_assigned, description, job_gross_time, collective = (
self.agents.assignsJobToAgent(
choosed_agent_name,
@@ -431,30 +431,30 @@ def perform_action_helper(
)
)
collective = None # Collective already assigned/Sina
- if i_assigned == False and description == 'ShortOfTime':
+ if i_assigned == False and description == 'ShortOfTime': # noqa: E712
distnace_agent_entity.pop(distnace_agent_entity.index[0])
break
- elif i_assigned == False and description == 'FASTCHECK':
+ elif i_assigned == False and description == 'FASTCHECK': # noqa: RET508, E712
self._registry.updateElementDamageTable(
self.entity[entity], action, node_name, 'NA', icheck=True
)
next_action = self.getNextSequence(self.entity[entity], action)
- if next_action != None:
+ if next_action != None: # noqa: E711
self._registry.updateElementDamageTable(
self.entity[entity],
next_action,
node_name,
- False,
+ False, # noqa: FBT003
icheck=True,
)
break
- elif i_assigned == False and description == 'SKIP':
+ elif i_assigned == False and description == 'SKIP': # noqa: E712
break
- elif i_assigned == True:
- if collective != None:
+ elif i_assigned == True: # noqa: E712
+ if collective != None: # noqa: E711
orginal_element = entity_data.loc[
node_name, 'Orginal_element'
]
@@ -515,10 +515,10 @@ def perform_action_helper(
'On_Going',
icheck=not flag,
)
- typed_ready_agent.drop(choosed_agent_name, inplace=True)
+ typed_ready_agent.drop(choosed_agent_name, inplace=True) # noqa: PD002
job_end_time = self.agents.getJobEndTime(choosed_agent_name)
- if job_end_time != None and description == 'INSIDE_SHIFT':
+ if job_end_time != None and description == 'INSIDE_SHIFT': # noqa: E711
modfied_end_time = self._addHardEvent(
job_end_time, 'agent', choosed_agent_name, stop_time
)
@@ -527,7 +527,7 @@ def perform_action_helper(
)
if (
- self._registry.isThereSuchOngoingLongJob(
+ self._registry.isThereSuchOngoingLongJob( # noqa: E712
node_name, action, entity
)
== True
@@ -542,7 +542,7 @@ def perform_action_helper(
break
- elif description == 'OUTSIDE_SHIFT':
+ elif description == 'OUTSIDE_SHIFT': # noqa: RET508
# logger.warning('cur_time= '+repr(stop_time)+', end_time= '+repr(stop_time+job_gross_time))
if not self._registry.isThereSuchOngoingLongJob(
node_name, action, entity
@@ -559,7 +559,7 @@ def perform_action_helper(
node_name, action, entity, choosed_agent_name
)
- end_shift_time = self.agents._agents.loc[
+ end_shift_time = self.agents._agents.loc[ # noqa: SLF001
choosed_agent_name, 'data'
].getAgentShiftEndTime(stop_time)
@@ -577,17 +577,17 @@ def perform_action_helper(
)
break
- elif job_end_time == None:
- raise ValueError('Job is not assigned to the agent')
+ elif job_end_time == None: # noqa: E711
+ raise ValueError('Job is not assigned to the agent') # noqa: EM101, TRY003
else:
raise ValueError('Unknown description: ' + description)
else:
- raise RuntimeError('i_assigned not boolean')
+ raise RuntimeError('i_assigned not boolean') # noqa: EM101, TRY003
# -----------------------------------------------------------
# self._registry.updatePipeDamageTableTimeSeries(stop_time)
- def assignVacantJob(
+ def assignVacantJob( # noqa: N802, D102
self,
vacant_job_list,
typed_ready_agent,
@@ -600,9 +600,9 @@ def assignVacantJob(
wn,
):
if not len(typed_ready_agent) > 0:
- raise RuntimeError(
+ raise RuntimeError( # noqa: TRY003
# JVM: Not sure what we're saying here.
- 'This should not happen. We have a condition before in perform action'
+ 'This should not happen. We have a condition before in perform action' # noqa: EM101
)
if not len(vacant_job_list) > 0:
return
@@ -624,7 +624,7 @@ def assignVacantJob(
flag=True,
)
- def applyEffect(
+ def applyEffect( # noqa: C901, N802, D102, PLR0912, PLR0915
self,
damage_node_name,
single_effect_data,
@@ -657,12 +657,12 @@ def applyEffect(
pattern_list = node.demand_timeseries_list.pattern_list()
default_pattern = wn.options.hydraulic.pattern
node_pattern_name = None
- if pattern_list[0] != None:
+ if pattern_list[0] != None: # noqa: E711
node_pattern_name = pattern_list[0].name
- elif pattern_list[0] == None and default_pattern != None:
+ elif pattern_list[0] == None and default_pattern != None: # noqa: E711
node_pattern_name = str(default_pattern)
- if node_pattern_name == None:
+ if node_pattern_name == None: # noqa: E711
multiplier = 1
else:
cur_pattern = wn.get_pattern(node_pattern_name)
@@ -691,8 +691,8 @@ def applyEffect(
elif element_type == 'PIPE':
leak_sum = 0
- pipe_damage_table = self._registry._pipe_damage_table
- pipe_break_history = self._registry._pipe_break_history
+ pipe_damage_table = self._registry._pipe_damage_table # noqa: SLF001
+ pipe_break_history = self._registry._pipe_break_history # noqa: SLF001
damage_type = pipe_damage_table.loc[damage_node_name, 'damage_type']
available_node_results = (
self._registry.result.node['demand'].loc[stop_time].dropna()
@@ -700,28 +700,28 @@ def applyEffect(
available_node_results = available_node_results.index
if damage_type == 'break':
if damage_node_name in pipe_damage_table.index:
- break_node_B = pipe_break_history.loc[
+ break_node_B = pipe_break_history.loc[ # noqa: N806
damage_node_name, 'Node_B'
]
if break_node_B in available_node_results:
- leak_beark_node_B = self._registry.result.node[
+ leak_beark_node_B = self._registry.result.node[ # noqa: N806
'demand'
].loc[stop_time, break_node_B]
else:
- leak_beark_node_B = 0
+ leak_beark_node_B = 0 # noqa: N806
leak_sum += leak_beark_node_B
else:
- break_node_A = (
+ break_node_A = ( # noqa: N806
pipe_break_history[
pipe_break_history['Node_B'] == damage_node_name
]
).iloc[0]['Node_A']
if break_node_A in available_node_results:
- leak_beark_node_A = self._registry.result.node[
+ leak_beark_node_A = self._registry.result.node[ # noqa: N806
'demand'
].loc[stop_time, break_node_A]
else:
- leak_beark_node_A = 0
+ leak_beark_node_A = 0 # noqa: N806
leak_sum += leak_beark_node_A
if damage_node_name in available_node_results:
@@ -730,7 +730,7 @@ def applyEffect(
]
leak_sum += leak_damaged_node
- self._registry._pipe_damage_table.loc[
+ self._registry._pipe_damage_table.loc[ # noqa: SLF001
damage_node_name, 'LeakAtCheck'
] = leak_sum
@@ -776,7 +776,7 @@ def applyEffect(
if 'PIPESIZE' in single_effect_data:
middle_pipe_size = single_effect_data['PIPESIZE']
elif 'CV' in single_effect_data:
- cv = single_effect_data['CV']
+ cv = single_effect_data['CV'] # noqa: F841
elif 'PUMP' in single_effect_data:
pump = {}
pump['POWER'] = single_effect_data['PUMP']
@@ -805,7 +805,7 @@ def applyEffect(
self.repair.removeLeak(damage_node_name, damage_type, wn)
elif effect_type == 'ISOLATE_DN':
- if 'FACTOR' in single_effect_data:
+ if 'FACTOR' in single_effect_data: # noqa: SIM401
factor = single_effect_data['FACTOR']
else:
factor = 1
@@ -825,7 +825,7 @@ def applyEffect(
self.repair.removeDemand(real_node_name, factor, wn)
self.repair.removeExplicitNodalLeak(real_node_name, factor, wn)
else:
- raise ValueError('Unknown nodal damage method')
+ raise ValueError('Unknown nodal damage method') # noqa: EM101, TRY003
elif effect_type == 'REPAIR':
if element_type == 'PIPE':
@@ -835,16 +835,16 @@ def applyEffect(
self.repair.removePipeRepair(damage_node_name, wn, action)
self.repair.repairPipe(damage_node_name, damage_type, wn)
elif element_type == 'DISTNODE':
- if self._registry.settings['Virtual_node'] == True:
+ if self._registry.settings['Virtual_node'] == True: # noqa: E712
real_node_name = get_node_name(
damage_node_name,
- self._registry._node_damage_table,
+ self._registry._node_damage_table, # noqa: SLF001
)
- virtual_node_table = self._registry._node_damage_table[
- self._registry._node_damage_table['Orginal_element']
+ virtual_node_table = self._registry._node_damage_table[ # noqa: SLF001
+ self._registry._node_damage_table['Orginal_element'] # noqa: SLF001
== real_node_name
]
- temp = virtual_node_table[action] == True
+ temp = virtual_node_table[action] == True # noqa: E712
if temp.all():
self.repairDistNode(real_node_name, wn)
@@ -892,10 +892,10 @@ def applyEffect(
else:
raise ValueError('Unknown effect_type: ' + repr(effect_type))
- def repairDistNode(self, damage_node_name, wn):
+ def repairDistNode(self, damage_node_name, wn): # noqa: N802, D102
self.repair.removeNodeTemporaryRepair(damage_node_name, wn)
- def updateShifiting(self, time):
+ def updateShifiting(self, time): # noqa: N802
"""Updates shifting with the new time given
Parameters
@@ -907,12 +907,12 @@ def updateShifiting(self, time):
-------
None.
- """
- if type(time) != int and type(time) != float:
+ """ # noqa: D400, D401
+ if type(time) != int and type(time) != float: # noqa: E721
raise ValueError('Time must be integer not ' + str(type(time)))
time = int(time)
if time < 0:
- raise ValueError('Time must be bigger than zero')
+ raise ValueError('Time must be bigger than zero') # noqa: EM101, TRY003
next_shift_time = self.shifting.getNextShiftTime(time)
# logger.debug('next shitt time = ' + str(next_shift_time))
self._addHardEvent(int(next_shift_time), 'shift')
@@ -920,7 +920,7 @@ def updateShifiting(self, time):
if 'shift' in self._hard_event_table['Requester'].loc[time]:
self.agents.setChangeShift(time, working_check=True)
- def updateAvailability(self, time):
+ def updateAvailability(self, time): # noqa: N802, D102
# SINA DELETET IT [URGENT]
# =============================================================================
# import pickle
@@ -945,9 +945,9 @@ def updateAvailability(self, time):
# #for agent_type in agent_type_list:
# return
# =============================================================================
- agent_type_list = self.agents._agents['type'].unique()
- availible_agent_table = self.agents._agents[
- self.agents._agents['available'].eq(True)
+ agent_type_list = self.agents._agents['type'].unique() # noqa: SLF001
+ availible_agent_table = self.agents._agents[ # noqa: SLF001
+ self.agents._agents['available'].eq(True) # noqa: FBT003, SLF001
]
for agent_type in agent_type_list:
if time == self.eq_time:
@@ -963,7 +963,7 @@ def updateAvailability(self, time):
]
availible_number = len(available_typed_table)
all_number = len(
- self.agents._agents[self.agents._agents['type'].eq(agent_type)]
+ self.agents._agents[self.agents._agents['type'].eq(agent_type)] # noqa: SLF001
)
new_availible_number = np.round(av_r * all_number) - availible_number
@@ -972,30 +972,30 @@ def updateAvailability(self, time):
available_typed_table.index.to_list(),
int(abs(new_availible_number)),
)
- self.agents._agents.loc[new_index_list, 'available'] = False
+ self.agents._agents.loc[new_index_list, 'available'] = False # noqa: SLF001
elif new_availible_number > 0:
- not_available_typed_table = self.agents._agents[
- (self.agents._agents['type'] == agent_type)
- & (self.agents._agents['available'] == False)
+ not_available_typed_table = self.agents._agents[ # noqa: SLF001
+ (self.agents._agents['type'] == agent_type) # noqa: SLF001
+ & (self.agents._agents['available'] == False) # noqa: SLF001, E712
]
new_index_list = random.sample(
not_available_typed_table.index.to_list(),
int(new_availible_number),
)
- self.agents._agents.loc[new_index_list, 'available'] = True
+ self.agents._agents.loc[new_index_list, 'available'] = True # noqa: SLF001
- def initializeActiveAgents(self, time):
- for name, data in self.agents._agents.iterrows():
+ def initializeActiveAgents(self, time): # noqa: N802, D102
+ for name, data in self.agents._agents.iterrows(): # noqa: B007, SLF001
agent = data['data']
if agent.isOnShift(time):
data['active'] = True
# data['ready'] = True
- def initializeReadyAgents(self):
- active_agents_name_list = self.agents._agents[
- self.agents._agents['active'].eq(True)
+ def initializeReadyAgents(self): # noqa: N802, D102
+ active_agents_name_list = self.agents._agents[ # noqa: SLF001
+ self.agents._agents['active'].eq(True) # noqa: FBT003, SLF001
].index
- self.agents._agents.loc[active_agents_name_list, 'ready'] = True
+ self.agents._agents.loc[active_agents_name_list, 'ready'] = True # noqa: SLF001
# for name, data in self.agents._agents.iterrows():
# f data['active'] == True:
# data['ready'] = True
@@ -1004,14 +1004,14 @@ def initializeReadyAgents(self):
# ready_agents_name_list = self.agents._agents['ready'].eq(True).index
# self.agents._agents.loc[ready_agents_name_list, 'available'] = True
- def initializeEntities(self, WaterNetwork):
+ def initializeEntities(self, WaterNetwork): # noqa: N802, N803, D102
for entity, val in self.entity_rule.items():
element_type = self.entity[entity]
if element_type not in self.ELEMENTS:
- raise ValueError('Unknown Element type')
+ raise ValueError('Unknown Element type') # noqa: EM101, TRY003
if val[0][0] == 'ALL':
- self._registry.setDamageData(element_type, entity, True)
+ self._registry.setDamageData(element_type, entity, True) # noqa: FBT003
else:
res = []
node_res = []
@@ -1041,7 +1041,7 @@ def initializeEntities(self, WaterNetwork):
element_type,
node_res,
entity,
- True,
+ True, # noqa: FBT003
iCheck=True,
)
@@ -1049,14 +1049,14 @@ def initializeEntities(self, WaterNetwork):
element_type,
union_list,
entity,
- True,
+ True, # noqa: FBT003
)
- def removeRecordsWithoutEntities(self, element_type):
+ def removeRecordsWithoutEntities(self, element_type): # noqa: N802, D102
entity_list = []
for entity in self.entity:
if self.entity[entity] == element_type:
- entity_list.append(entity)
+ entity_list.append(entity) # noqa: PERF401
damage_table = self._registry.getDamageData(element_type, iCopy=False)
if len(entity_list) > 0:
@@ -1067,9 +1067,9 @@ def removeRecordsWithoutEntities(self, element_type):
].index.tolist()
else:
not_asigned_damaged_table = damage_table.index.to_list()
- damage_table.drop(not_asigned_damaged_table, inplace=True)
+ damage_table.drop(not_asigned_damaged_table, inplace=True) # noqa: PD002
- def initializeGroups(self):
+ def initializeGroups(self): # noqa: N802, D102
for el in self.ELEMENTS:
group_name_list = []
@@ -1107,28 +1107,28 @@ def initializeGroups(self):
temp = temp[group_name_list]
temp_list = []
- for col_name, col in temp.iteritems():
+ for col_name, col in temp.iteritems(): # noqa: B007
not_na = col.notna()
- not_na = not_na[not_na == False]
+ not_na = not_na[not_na == False] # noqa: E712
temp_list.append(not_na.index.tolist())
temp_list = self._unionOfAll(temp_list)
if len(temp_list) > 0:
- print(
+ print( # noqa: T201
'In element: '
+ repr(el)
+ ', the following damaged locations does not have a assigned group and will not be affected in the course of restoration:\n'
+ repr(temp_list)
)
logger.warning(
- 'In element: '
+ 'In element: ' # noqa: G003
+ repr(el)
+ ', the following damaged locations does not have a assigned group and will not be affected in the course of restoration:\n'
+ repr(temp_list)
)
- def initializeGroups_old(self):
+ def initializeGroups_old(self): # noqa: N802, D102
for el in self.ELEMENTS:
group_name_list = []
@@ -1166,28 +1166,28 @@ def initializeGroups_old(self):
temp = temp[group_name_list]
temp_list = []
- for col_name, col in temp.iteritems():
+ for col_name, col in temp.iteritems(): # noqa: B007
not_na = col.notna()
- not_na = not_na[not_na == False]
+ not_na = not_na[not_na == False] # noqa: E712
temp_list.append(not_na.index.tolist())
temp_list = self._unionOfAll(temp_list)
if len(temp_list) > 0:
- print(
+ print( # noqa: T201
'In element: '
+ repr(el)
+ ', the following damaged locations does not have a assigned group and will not be affected in the course of restoration:\n'
+ repr(temp_list)
)
logger.warning(
- 'In element: '
+ 'In element: ' # noqa: G003
+ repr(el)
+ ', the following damaged locations does not have a assigned group and will not be affected in the course of restoration:\n'
+ repr(temp_list)
)
- def initializeNumberOfDamages(self):
+ def initializeNumberOfDamages(self): # noqa: N802, D102
for element_type in self.ELEMENTS:
if (
'Number_of_damages'
@@ -1199,17 +1199,17 @@ def initializeNumberOfDamages(self):
element_type, 'Number_of_damages', 1
)
- def _unionOfAll(self, in_list):
+ def _unionOfAll(self, in_list): # noqa: N802
num_of_lists = len(in_list)
if len(in_list) == 0:
return in_list
if len(in_list) == 1:
- if type(in_list[0]) == list:
+ if type(in_list[0]) == list: # noqa: E721
return in_list[0]
- else:
- raise ValueError('Something is wrong here')
+ else: # noqa: RET505
+ raise ValueError('Something is wrong here') # noqa: EM101, TRY003
first_list = in_list[0]
second_list = in_list[1]
@@ -1217,18 +1217,18 @@ def _unionOfAll(self, in_list):
for item in first_list:
if item in second_list:
- union_list.append(item)
+ union_list.append(item) # noqa: PERF401
- if num_of_lists == 2:
+ if num_of_lists == 2: # noqa: PLR2004
return union_list
- else:
+ else: # noqa: RET505
in_list.pop(0)
in_list[0] = union_list
return self._unionOfAll(in_list)
- def _getRefinedElementList(
+ def _getRefinedElementList( # noqa: N802
self,
- WaterNetwork,
+ WaterNetwork, # noqa: N803
attribute,
condition,
condition_value,
@@ -1257,7 +1257,7 @@ def _getRefinedElementList(
return res, node_res
- def refineEntityDamageTable(
+ def refineEntityDamageTable( # noqa: N802, D102
self,
damaged_table,
group_name,
@@ -1266,7 +1266,7 @@ def refineEntityDamageTable(
):
ret = []
# logger.warning('Sina')
- if group_name == None:
+ if group_name == None: # noqa: E711
ret = damaged_table
# logger.warning('1')
@@ -1279,7 +1279,7 @@ def refineEntityDamageTable(
ret = damaged_table[damaged_table[group_name] == agent_group_tag]
if len(ret) == 0:
logger.warning(
- 'Empty damage table in element type='
+ 'Empty damage table in element type=' # noqa: G003
+ repr(element_type)
+ 'group name='
+ repr(group_name)
@@ -1291,17 +1291,17 @@ def refineEntityDamageTable(
return ret
- def _refine_table(self, table, attribute, condition, condition_value):
+ def _refine_table(self, table, attribute, condition, condition_value): # noqa: C901
refined_table = None
- if type(condition_value) == str:
+ if type(condition_value) == str: # noqa: E721
if condition == 'EQ':
refined_table = table[table[attribute] == condition_value]
elif condition == 'NOTEQ':
refined_table = table[table[attribute] != condition_value]
else:
raise ValueError('Undefined condition: ' + repr(condition))
- elif type(condition_value) == int or type(condition_value) == float:
+ elif type(condition_value) == int or type(condition_value) == float: # noqa: E721
if condition == 'EQ':
refined_table = table[table[attribute] == condition_value]
elif condition == 'BG-EQ':
@@ -1321,9 +1321,9 @@ def _refine_table(self, table, attribute, condition, condition_value):
return refined_table
- def _getRefinedNodeElementList(
+ def _getRefinedNodeElementList( # noqa: C901, N802
self,
- WaterNetwork,
+ WaterNetwork, # noqa: ARG002, N803
attribute,
condition,
condition_value,
@@ -1333,7 +1333,7 @@ def _getRefinedNodeElementList(
res = []
node_res = []
- if attribute == 'FILE' or attribute == 'NOT_IN_FILE':
+ if attribute == 'FILE' or attribute == 'NOT_IN_FILE': # noqa: PLR1714
node_damage_list = self._registry.getDamageData(element_type)
for org_file_name in condition_value:
@@ -1352,8 +1352,8 @@ def _getRefinedNodeElementList(
temp.index = temp['random_sina_index']
temp = temp.drop('random_sina_index', axis=1)
else:
- if type(org_file_name) == str:
- org_file_name = [org_file_name]
+ if type(org_file_name) == str: # noqa: E721
+ org_file_name = [org_file_name] # noqa: PLW2901
temp = node_damage_list.loc[org_file_name]
ichosen = False
@@ -1362,11 +1362,11 @@ def _getRefinedNodeElementList(
res.extend(temp.index.tolist())
ichosen = True
- if ichosen == False:
+ if ichosen == False: # noqa: E712
if org_file_name in wn.node_name_list:
ichosen = True
node_res.append(org_file_name)
- if ichosen == False:
+ if ichosen == False: # noqa: E712
raise ValueError(
'Element with ID: '
+ repr(org_file_name)
@@ -1397,9 +1397,9 @@ def _getRefinedNodeElementList(
return res, node_res
- def _getRefinedPumpList(
+ def _getRefinedPumpList( # noqa: N802
self,
- WaterNetwork,
+ WaterNetwork, # noqa: ARG002, N803
attribute,
condition,
condition_value,
@@ -1417,7 +1417,7 @@ def _getRefinedPumpList(
if len(temp) == 1:
element_res.append(temp.element_name[0])
elif len(temp) > 1:
- raise ValueError('Something wrong here')
+ raise ValueError('Something wrong here') # noqa: EM101, TRY003
if attribute == 'NOT_IN_FILE':
index_list = pump_damage_list.element_name.tolist()
@@ -1427,7 +1427,7 @@ def _getRefinedPumpList(
element_res = index_list
elif attribute in self._registry.getDamageData('PUMP').columns:
- temp = self._registry._pump_damage_table
+ temp = self._registry._pump_damage_table # noqa: SLF001
refined_table = self._refine_table(
temp, attribute, condition, condition_value
@@ -1450,9 +1450,9 @@ def _getRefinedPumpList(
res.append(temp)
return res
- def _getRefinedPipeList(
+ def _getRefinedPipeList( # noqa: C901, N802
self,
- WaterNetwork,
+ WaterNetwork, # noqa: N803
attribute,
condition,
condition_value,
@@ -1488,7 +1488,7 @@ def _getRefinedPipeList(
if pipe_value <= condition_value:
res.append(damage_name)
- elif attribute == 'FILE' or attribute == 'NOT_IN_FILE':
+ elif attribute == 'FILE' or attribute == 'NOT_IN_FILE': # noqa: PLR1714
pipe_damage_list = self._registry.getDamageData('PIPE')
for org_file_name in condition_value:
temp = pipe_damage_list[
@@ -1526,16 +1526,16 @@ def _getRefinedPipeList(
return res
- def _getReminderTime(self, name):
+ def _getReminderTime(self, name): # noqa: N802
return self._reminder_time_hard_event[name]
- def _saveReminderTime(self, time, name):
+ def _saveReminderTime(self, time, name): # noqa: N802
if name not in self._reminder_time_hard_event:
self._reminder_time_hard_event[name] = int(time)
else:
self._reminder_time_hard_event[name] += int(time)
- def _addHardEvent(self, next_time, requester, detail=None, current_time=None):
+ def _addHardEvent(self, next_time, requester, detail=None, current_time=None): # noqa: N802
"""Adds a hard event
Parameters
@@ -1549,24 +1549,24 @@ def _addHardEvent(self, next_time, requester, detail=None, current_time=None):
-------
None.
- """
+ """ # noqa: D400, D401
time = int(next_time)
next_time = int(next_time)
- if type(next_time) != int and type(next_time) != float:
+ if type(next_time) != int and type(next_time) != float: # noqa: E721
raise ValueError('time must be int, not ' + str(type(next_time)))
- if detail != None and current_time == None:
- raise ValueError('When detail is provided, current time cannot be None')
+ if detail != None and current_time == None: # noqa: E711
+ raise ValueError('When detail is provided, current time cannot be None') # noqa: EM101, TRY003
minimum_time_devision = int(self._registry.settings['simulation_time_step'])
- if current_time != None:
+ if current_time != None: # noqa: E711
if next_time < current_time:
- raise ValueError('Time is smaller than current time')
- if detail == None:
- raise ValueError(
- 'When current time is provided, detail cannot be None'
+ raise ValueError('Time is smaller than current time') # noqa: EM101, TRY003
+ if detail == None: # noqa: E711
+ raise ValueError( # noqa: TRY003
+ 'When current time is provided, detail cannot be None' # noqa: EM101
)
if minimum_time_devision < 0:
- raise ValueError('Minimum time division cannot be negative')
+ raise ValueError('Minimum time division cannot be negative') # noqa: EM101, TRY003
name = requester + '-' + detail
@@ -1574,7 +1574,7 @@ def _addHardEvent(self, next_time, requester, detail=None, current_time=None):
_b = np.round(time / minimum_time_devision)
- if abs(_b) < 0.01:
+ if abs(_b) < 0.01: # noqa: PLR2004
_b = 1
new_time = _b * minimum_time_devision
@@ -1594,7 +1594,7 @@ def _addHardEvent(self, next_time, requester, detail=None, current_time=None):
]
elif (
requester in self._hard_event_table.loc[next_time, 'Requester']
- and detail == None
+ and detail == None # noqa: E711
):
pass
else:
@@ -1604,20 +1604,20 @@ def _addHardEvent(self, next_time, requester, detail=None, current_time=None):
return next_time
- def _isHardEvent(self, time, requester=None):
- if requester == None:
+ def _isHardEvent(self, time, requester=None): # noqa: N802
+ if requester == None: # noqa: E711
return time in self._hard_event_table.index
- else:
+ else: # noqa: RET505
if time in self._hard_event_table.index:
req = self._hard_event_table.loc[time, 'Requester']
if requester in req:
return True
return False
- def getHardEventDetails(self, time, by=None):
- if by == None:
+ def getHardEventDetails(self, time, by=None): # noqa: N802, D102
+ if by == None: # noqa: E711
return self._hard_event_table.loc[time, 'Detail']
- elif by not in self._hard_event_table.loc[time, 'Requester']:
+ elif by not in self._hard_event_table.loc[time, 'Requester']: # noqa: RET505
return []
else:
res = []
@@ -1627,28 +1627,28 @@ def getHardEventDetails(self, time, by=None):
for requester in requester_list:
if requester == by:
res.append(detail_list[i])
- i += 1
+ i += 1 # noqa: SIM113
return res
- def getNewEventsTime(self, reset=False):
+ def getNewEventsTime(self, reset=False): # noqa: FBT002, N802, D102
new_event_table = self._hard_event_table[
- self._hard_event_table['New'] == True
+ self._hard_event_table['New'] == True # noqa: E712
]
new_event_table = new_event_table.sort_index()
- if reset == True:
- for ind, val in new_event_table.iterrows():
+ if reset == True: # noqa: E712
+ for ind, val in new_event_table.iterrows(): # noqa: B007
self._hard_event_table.loc[ind, 'New'] = False
return list(new_event_table.index)
- def unmarkNewEvents(self):
- self._hard_event_table['new'][self._hard_event_table['New'] == True] = False
+ def unmarkNewEvents(self): # noqa: N802, D102
+ self._hard_event_table['new'][self._hard_event_table['New'] == True] = False # noqa: E712
- def getAllSequences(self, element_type):
+ def getAllSequences(self, element_type): # noqa: N802, D102
return self.sequence[element_type]
- def getNextSequence(self, element_type, cur_seq):
+ def getNextSequence(self, element_type, cur_seq): # noqa: N802, D102
seq_list = self.sequence[element_type]
if cur_seq not in seq_list:
raise ValueError('Sequence was not in sequence list: ' + str(cur_seq))
@@ -1659,10 +1659,10 @@ def getNextSequence(self, element_type, cur_seq):
i += 1
if not i + 1 < len(seq_list):
return None
- else:
+ else: # noqa: RET505
return seq_list[i + 1]
- def initialize(self, wn, stop_time, delay=0, earthquake=None):
+ def initialize(self, wn, stop_time, delay=0, earthquake=None): # noqa: C901, D102
self.if_initiated = True
self.eq_time = stop_time
if delay < 0:
@@ -1670,7 +1670,7 @@ def initialize(self, wn, stop_time, delay=0, earthquake=None):
self.delay = delay
if stop_time < 0:
- raise ValueError('Stop time is less than 0')
+ raise ValueError('Stop time is less than 0') # noqa: EM101, TRY003
# refined_pump = self.pump_restoration[self.pump_restoration['Restore_time']>=stop_time]
if not self.pump_restoration.empty:
@@ -1684,17 +1684,17 @@ def initialize(self, wn, stop_time, delay=0, earthquake=None):
)
for (
- ind,
+ ind, # noqa: B007
row,
- ) in self.pump_restoration.items():
+ ) in self.pump_restoration.items(): # noqa: PERF102
self._addHardEvent(row['Restore_time'], 'pump')
- if type(self.tank_restoration) != pd.core.series.Series:
- raise
+ if type(self.tank_restoration) != pd.core.series.Series: # noqa: E721
+ raise # noqa: PLE0704
for (
- ind,
+ ind, # noqa: B007
row,
- ) in self.tank_restoration.items():
+ ) in self.tank_restoration.items(): # noqa: PERF102
self._addHardEvent(row['Restore_time'], 'tank')
self.restoration_start_time = stop_time + delay
@@ -1722,43 +1722,43 @@ def initialize(self, wn, stop_time, delay=0, earthquake=None):
self.removeRecordsWithoutEntities('GNODE')
for el in self.ELEMENTS:
- self._registry.setDamageData(el, 'discovered', False)
+ self._registry.setDamageData(el, 'discovered', False) # noqa: FBT003
self.initializeGroups()
self.initializeNumberOfDamages()
for seq_key, seq_list in self.sequence.items():
- self._registry.setDamageData(seq_key, seq_list[0], False)
+ self._registry.setDamageData(seq_key, seq_list[0], False) # noqa: FBT003
if self.delay == 0:
event_time_list = self.perform_action(wn, stop_time)
else:
event_time_list = self.getNewEventsTime(reset=True)
- if earthquake != None:
+ if earthquake != None: # noqa: E711
self.earthquake = earthquake
event_time_list = event_time_list[1:]
- return event_time_list
+ return event_time_list # noqa: RET504
- def iRestorationStopTime(self):
- if self.if_initiated == False:
+ def iRestorationStopTime(self): # noqa: N802, D102
+ if self.if_initiated == False: # noqa: E712
return False
logger.debug('Func: node functionality')
pipe_damage_end = self.iAllPipeLastActionDone()
node_damage_end = self.iAllNodeLastActionDone()
pump_damage_end = self.iAllPumpLastActionDone()
- GNODE_damage_end = self.iAllGNodeLastActionDone()
+ GNODE_damage_end = self.iAllGNodeLastActionDone() # noqa: N806
tank_damage_end = self.iAllTankLastActionDone()
reservoir_damage_end = self.iAllReservoirLastActionDone()
- logger.debug('pipe: ' + repr(pipe_damage_end))
- logger.debug('node: ' + repr(node_damage_end))
- logger.debug('pump: ' + repr(pump_damage_end))
- logger.debug('GNODE: ' + repr(GNODE_damage_end))
- logger.debug('tank: ' + repr(tank_damage_end))
- logger.debug('reservoir: ' + repr(reservoir_damage_end))
+ logger.debug('pipe: ' + repr(pipe_damage_end)) # noqa: G003
+ logger.debug('node: ' + repr(node_damage_end)) # noqa: G003
+ logger.debug('pump: ' + repr(pump_damage_end)) # noqa: G003
+ logger.debug('GNODE: ' + repr(GNODE_damage_end)) # noqa: G003
+ logger.debug('tank: ' + repr(tank_damage_end)) # noqa: G003
+ logger.debug('reservoir: ' + repr(reservoir_damage_end)) # noqa: G003
- if (
+ if ( # noqa: SIM103
pipe_damage_end
and node_damage_end
and pump_damage_end
@@ -1767,126 +1767,126 @@ def iRestorationStopTime(self):
and reservoir_damage_end
):
return True
- else:
+ else: # noqa: RET505
return False
- def iAllPipeLastActionDone(self):
- print()
+ def iAllPipeLastActionDone(self): # noqa: N802, D102
+ print() # noqa: T201
if 'PIPE' in self.sequence:
- if len(self._registry._pipe_damage_table) == 0:
+ if len(self._registry._pipe_damage_table) == 0: # noqa: SLF001
return True
pipe_action = self.sequence['PIPE'][-1]
- pipe_last_action_values = self._registry._pipe_damage_table[pipe_action]
+ pipe_last_action_values = self._registry._pipe_damage_table[pipe_action] # noqa: SLF001
if_pipe_last_action_true = (
pipe_last_action_values
== True | (pipe_last_action_values == 'Collective')
).all()
- if if_pipe_last_action_true:
+ if if_pipe_last_action_true: # noqa: SIM103
return True
- else:
+ else: # noqa: RET505
return False
else:
return True
- def iAllNodeLastActionDone(self):
+ def iAllNodeLastActionDone(self): # noqa: N802, D102
if 'DISTNODE' in self.sequence:
- if len(self._registry._node_damage_table) == 0:
+ if len(self._registry._node_damage_table) == 0: # noqa: SLF001
return True
node_action = self.sequence['DISTNODE'][-1]
- node_last_action_values = self._registry._node_damage_table[node_action]
+ node_last_action_values = self._registry._node_damage_table[node_action] # noqa: SLF001
if_node_last_action_true = (
node_last_action_values
== True | (node_last_action_values == 'Collective')
).all()
- if if_node_last_action_true == True:
+ if if_node_last_action_true == True: # noqa: SIM103, E712
return True
- else:
+ else: # noqa: RET505
return False
else:
return True
- def iAllPumpLastActionDone(self):
+ def iAllPumpLastActionDone(self): # noqa: N802, D102
if 'PUMP' in self.sequence:
- if len(self._registry._pump_damage_table) == 0:
+ if len(self._registry._pump_damage_table) == 0: # noqa: SLF001
return True
pump_action = self.sequence['PUMP'][-1]
- pump_last_action_values = self._registry._pump_damage_table[pump_action]
+ pump_last_action_values = self._registry._pump_damage_table[pump_action] # noqa: SLF001
- if len(self._registry._pump_damage_table) == 0:
+ if len(self._registry._pump_damage_table) == 0: # noqa: SLF001
return True
- if_pump_last_action_true = (pump_last_action_values == True).all()
+ if_pump_last_action_true = (pump_last_action_values == True).all() # noqa: E712
- if if_pump_last_action_true == True:
+ if if_pump_last_action_true == True: # noqa: SIM103, E712
return True
- else:
+ else: # noqa: RET505
return False
else:
return True
- def iAllGNodeLastActionDone(self):
+ def iAllGNodeLastActionDone(self): # noqa: N802, D102
if 'GNODE' in self.sequence:
- if len(self._registry._gnode_damage_table) == 0:
+ if len(self._registry._gnode_damage_table) == 0: # noqa: SLF001
return True
gnode_action = self.sequence['GNODE'][-1]
- gnode_last_action_values = self._registry._gnode_damage_table[
+ gnode_last_action_values = self._registry._gnode_damage_table[ # noqa: SLF001
gnode_action
]
- if_gnode_last_action_true = (gnode_last_action_values == True).all()
+ if_gnode_last_action_true = (gnode_last_action_values == True).all() # noqa: E712
- if if_gnode_last_action_true == True:
+ if if_gnode_last_action_true == True: # noqa: SIM103, E712
return True
- else:
+ else: # noqa: RET505
return False
else:
return True
- def iAllTankLastActionDone(self):
+ def iAllTankLastActionDone(self): # noqa: N802, D102
if 'TANK' in self.sequence:
- if len(self._registry._tank_damage_table) == 0:
+ if len(self._registry._tank_damage_table) == 0: # noqa: SLF001
return True
tank_action = self.sequence['TANK'][-1]
- tank_last_action_values = self._registry._tank_damage_table[tank_action]
- if_tank_last_action_true = (tank_last_action_values == True).all()
+ tank_last_action_values = self._registry._tank_damage_table[tank_action] # noqa: SLF001
+ if_tank_last_action_true = (tank_last_action_values == True).all() # noqa: E712
- if if_tank_last_action_true == True:
+ if if_tank_last_action_true == True: # noqa: SIM103, E712
return True
- else:
+ else: # noqa: RET505
return False
else:
return True
- def iAllReservoirLastActionDone(self):
+ def iAllReservoirLastActionDone(self): # noqa: N802, D102
if 'RESERVOIR' in self.sequence:
- if len(self._registry._reservoir_damage_table) == 0:
+ if len(self._registry._reservoir_damage_table) == 0: # noqa: SLF001
return True
reservoir_action = self.sequence['RESERVOIR'][-1]
- reservoir_last_action_values = self._registry._reservoir_damage_table[
+ reservoir_last_action_values = self._registry._reservoir_damage_table[ # noqa: SLF001
reservoir_action
]
if_reservoir_last_action_true = (
- reservoir_last_action_values == True
+ reservoir_last_action_values == True # noqa: E712
).all()
- if if_reservoir_last_action_true == True:
+ if if_reservoir_last_action_true == True: # noqa: SIM103, E712
return True
- else:
+ else: # noqa: RET505
return False
else:
return True
- def getHydSigPipeList(self):
+ def getHydSigPipeList(self): # noqa: N802, D102
damage_group_list = self.priority.getHydSigDamageGroups()
pipe_damage_group_list = [
cur_damage_group
for cur_damage_group in damage_group_list
if self.entity[cur_damage_group] == 'PIPE'
]
- return pipe_damage_group_list
+ return pipe_damage_group_list # noqa: RET504
diff --git a/modules/systemPerformance/REWET/REWET/restoration/registry.py b/modules/systemPerformance/REWET/REWET/restoration/registry.py
index 65d296ba5..7e0c87119 100644
--- a/modules/systemPerformance/REWET/REWET/restoration/registry.py
+++ b/modules/systemPerformance/REWET/REWET/restoration/registry.py
@@ -1,7 +1,7 @@
"""Created on Sat Dec 26 03:22:21 2020
@author: snaeimi
-"""
+""" # noqa: INP001, D400
import logging
from collections import OrderedDict
@@ -13,8 +13,8 @@
logger = logging.getLogger(__name__)
-class Registry:
- def __init__(self, WaterNetwork, settings, demand_node_name_list, scenario_name):
+class Registry: # noqa: D101
+ def __init__(self, WaterNetwork, settings, demand_node_name_list, scenario_name): # noqa: N803
self._registry_version = 0.15
self.wn = WaterNetwork
self.settings = settings
@@ -89,7 +89,7 @@ def __init__(self, WaterNetwork, settings, demand_node_name_list, scenario_name)
self._pipe_data.loc[name] = [pipe.diameter]
for node_name, node in WaterNetwork.junctions():
- if node.demand_timeseries_list[0].base_value > 0.00000008:
+ if node.demand_timeseries_list[0].base_value > 0.00000008: # noqa: PLR2004
self.demand_node_name_list.append(node_name)
# for demand_node_name in self.demand_node_name_list:
@@ -139,8 +139,8 @@ def __init__(self, WaterNetwork, settings, demand_node_name_list, scenario_name)
# self._restoration_table = self._restoration_table.append(temp, ignore_index=True)
# =============================================================================
- def addRestorationDataOnPipe(self, damage_node_name, time, state):
- if self.settings['dmg_rst_data_save'] == True:
+ def addRestorationDataOnPipe(self, damage_node_name, time, state): # noqa: N802, D102
+ if self.settings['dmg_rst_data_save'] == True: # noqa: E712
orginal_pipe_name = self._pipe_damage_table.loc[
damage_node_name, 'Orginal_element'
]
@@ -152,7 +152,7 @@ def addRestorationDataOnPipe(self, damage_node_name, time, state):
}
self.Pipe_Damage_restoration_report.append(temp_row)
- def addEquavalantDamageHistory(
+ def addEquavalantDamageHistory( # noqa: N802, D102
self,
node_name,
new_node_name,
@@ -161,7 +161,7 @@ def addEquavalantDamageHistory(
number_of_damages,
):
if node_name in self.ED_history:
- raise ValueError('Node_damage already in history')
+ raise ValueError('Node_damage already in history') # noqa: EM101, TRY003
self.ED_history.loc[node_name] = {
'new_node_name': new_node_name,
@@ -171,18 +171,18 @@ def addEquavalantDamageHistory(
'current_number_of_damage': number_of_damages,
}
- def getEquavalantDamageHistory(self, node_name):
+ def getEquavalantDamageHistory(self, node_name): # noqa: N802, D102
temp = self.ED_history[node_name]
- if type(temp) != dict:
+ if type(temp) != dict: # noqa: E721
raise ValueError('probably two damages with the same name: ' + node_name)
return temp
- def removeEquavalantDamageHistory(self, node_name):
- self.ED_history.drop(node_name, inplace=True)
+ def removeEquavalantDamageHistory(self, node_name): # noqa: N802, D102
+ self.ED_history.drop(node_name, inplace=True) # noqa: PD002
- def isThereSuchOngoingLongJob(self, damaged_node_name, action, entity):
+ def isThereSuchOngoingLongJob(self, damaged_node_name, action, entity): # noqa: N802, D102
data = self._long_task_data
temp = data[['Node_name', 'Action', 'Entity']] == [
damaged_node_name,
@@ -192,16 +192,16 @@ def isThereSuchOngoingLongJob(self, damaged_node_name, action, entity):
temp = data[temp.all(1)]
if len(temp) > 1:
- raise ValueError('More job than 1 in long jobs')
- elif len(temp) == 1:
- if abs(temp['Time'].iloc[0]) < 0.01:
- raise ValueError('Something Wrong')
- else:
+ raise ValueError('More job than 1 in long jobs') # noqa: EM101, TRY003
+ elif len(temp) == 1: # noqa: RET506
+ if abs(temp['Time'].iloc[0]) < 0.01: # noqa: PLR2004
+ raise ValueError('Something Wrong') # noqa: EM101, TRY003
+ else: # noqa: RET506
return True
else:
return False
- def addLongJob(
+ def addLongJob( # noqa: N802, D102
self,
damaged_node_name,
action,
@@ -236,7 +236,7 @@ def addLongJob(
)
self._long_task_data = data.append(temp, ignore_index=True)
- def assignAgenttoLongJob(
+ def assignAgenttoLongJob( # noqa: N802, D102
self,
damaged_node_name,
action,
@@ -263,8 +263,8 @@ def assignAgenttoLongJob(
ind = temp.index[0]
if (
- self._long_task_data.loc[ind, 'cur_agent_name'] != None
- and choosed_agent_name != None
+ self._long_task_data.loc[ind, 'cur_agent_name'] != None # noqa: E711
+ and choosed_agent_name != None # noqa: E711
):
raise ValueError(
'Already someone is here '
@@ -273,7 +273,7 @@ def assignAgenttoLongJob(
self._long_task_data.loc[ind, 'cur_agent_name'] = choosed_agent_name
- def deductLongJobTime(self, damaged_node_name, action, entity, deduced_time):
+ def deductLongJobTime(self, damaged_node_name, action, entity, deduced_time): # noqa: N802, D102
deduced_time = int(deduced_time)
if deduced_time < 0:
@@ -299,7 +299,7 @@ def deductLongJobTime(self, damaged_node_name, action, entity, deduced_time):
+ ', '
+ entity
)
- elif len(temp) > 1:
+ elif len(temp) > 1: # noqa: RET506
raise ValueError(
'There are MORE THAN ONE long task defined for: '
+ damaged_node_name
@@ -313,7 +313,7 @@ def deductLongJobTime(self, damaged_node_name, action, entity, deduced_time):
if (self._long_task_data.loc[ind, 'Time'] - deduced_time) < 0:
logger.warning(
- damaged_node_name
+ damaged_node_name # noqa: G003
+ ', '
+ action
+ ', '
@@ -325,11 +325,11 @@ def deductLongJobTime(self, damaged_node_name, action, entity, deduced_time):
+ ', '
+ str(self._long_task_data.loc[ind, 'Time'] - deduced_time)
)
- raise ValueError('Zero reminded time for long task')
+ raise ValueError('Zero reminded time for long task') # noqa: EM101, TRY003
self._long_task_data.loc[ind, 'Time'] -= deduced_time
- def getLongJobRemindedTime(self, damaged_node_name, action, entity):
+ def getLongJobRemindedTime(self, damaged_node_name, action, entity): # noqa: N802, D102
data = self._long_task_data
temp = data[['Node_name', 'Action', 'Entity']] == [
damaged_node_name,
@@ -348,7 +348,7 @@ def getLongJobRemindedTime(self, damaged_node_name, action, entity):
+ ','
+ entity
)
- elif len(temp) > 1:
+ elif len(temp) > 1: # noqa: RET506
raise ValueError(
'There are MORE THAN ONE long task defined for: '
+ damaged_node_name
@@ -360,30 +360,30 @@ def getLongJobRemindedTime(self, damaged_node_name, action, entity):
return temp['Time'].iloc[0]
- def getVacantOnGoingJobs(self, action, entity):
+ def getVacantOnGoingJobs(self, action, entity): # noqa: N802, D102
res = []
data = self._long_task_data
temp = data[['Action', 'Entity']] == [action, entity]
temp = data[temp.all(1)]
- for ind, data in temp.iterrows():
- if data['cur_agent_name'] == None:
+ for ind, data in temp.iterrows(): # noqa: B007
+ if data['cur_agent_name'] == None: # noqa: E711
res.append(data['Node_name'])
return res
- def getdamagedNodesOfPipes(self, damage_type):
- if damage_type != 'break' and damage_type != 'leak':
- raise ValueError('The damage for pipe is either break or leak.')
+ def getdamagedNodesOfPipes(self, damage_type): # noqa: N802, D102
+ if damage_type != 'break' and damage_type != 'leak': # noqa: PLR1714
+ raise ValueError('The damage for pipe is either break or leak.') # noqa: EM101, TRY003
- if damage_type == 'break':
+ if damage_type == 'break': # noqa: RET503
return self._pipe_break_history[['Node_A', 'Node_B']]
- elif damage_type == 'leak':
+ elif damage_type == 'leak': # noqa: RET505
return self._pipe_leak_history['Node_name']
- def removeLongJob(self, damaged_node_name, action, entity):
+ def removeLongJob(self, damaged_node_name, action, entity): # noqa: N802, D102
data = self._long_task_data
temp = data[['Node_name', 'Action', 'Entity']] == [
damaged_node_name,
@@ -402,7 +402,7 @@ def removeLongJob(self, damaged_node_name, action, entity):
+ ','
+ entity
)
- elif len(temp) > 1:
+ elif len(temp) > 1: # noqa: RET506
raise ValueError(
'There are MORE THAN ONE long task defined for: '
+ damaged_node_name
@@ -414,9 +414,9 @@ def removeLongJob(self, damaged_node_name, action, entity):
ind = temp.index[0]
- self._long_task_data.drop(ind, inplace=True)
+ self._long_task_data.drop(ind, inplace=True) # noqa: PD002
- def addFunctionDataToRestorationRegistry(
+ def addFunctionDataToRestorationRegistry( # noqa: N802, D102
self,
damaged_node_name,
history,
@@ -448,10 +448,10 @@ def addFunctionDataToRestorationRegistry(
temp, ignore_index=True
)
- def addNodalDamage(self, nodal_damage, new_pipe_name_list):
- if self.settings['Virtual_node'] == True:
+ def addNodalDamage(self, nodal_damage, new_pipe_name_list): # noqa: N802, D102
+ if self.settings['Virtual_node'] == True: # noqa: E712
for ind, val in nodal_damage.items():
- val = int(val)
+ val = int(val) # noqa: PLW2901
virtual_node_name_list = []
for i in range(val):
new_virtual_node_name = ind + '_vir_' + str(i)
@@ -483,15 +483,15 @@ def addNodalDamage(self, nodal_damage, new_pipe_name_list):
'number_of_damages': val,
}
- def isVirtualNodeDamaged(self, virtual_node_name):
+ def isVirtualNodeDamaged(self, virtual_node_name): # noqa: N802, D102
return self.virtual_node_data[virtual_node_name]['is_damaged']
- def setVirtualNodeRepaired(self, virtual_node_name):
+ def setVirtualNodeRepaired(self, virtual_node_name): # noqa: N802, D102
self.virtual_node_data[virtual_node_name]['is_damaged'] = False
- def addNodalDemandChange(self, node_name, demand1, demand2):
+ def addNodalDemandChange(self, node_name, demand1, demand2): # noqa: N802, D102
# if self.settings['Virtual_node'] == False:
- if type(node_name) == str:
+ if type(node_name) == str: # noqa: E721
if node_name not in self._node_damage_table.index:
raise ValueError(repr(node_name) + ' is not in the node table')
self._node_damage_table.loc[node_name, 'Demand1'] = demand1
@@ -501,7 +501,7 @@ def addNodalDemandChange(self, node_name, demand1, demand2):
# self._node_damage_table.loc[node_name_vir, 'Demand1'] = demand1
# self._node_damage_table.loc[node_name_vir, 'Demand2'] = demand2
- def addPipeDamageToRegistry(self, node_name, data):
+ def addPipeDamageToRegistry(self, node_name, data): # noqa: N802
"""Adds damage to pipe registry
Parameters
@@ -515,13 +515,13 @@ def addPipeDamageToRegistry(self, node_name, data):
-------
None.
- """
+ """ # noqa: D400, D401
# self._pipe_node_damage_status[name] = data
- leaking_pipe_with_pipeA_orginal_pipe = self._pipe_leak_history[
+ leaking_pipe_with_pipeA_orginal_pipe = self._pipe_leak_history[ # noqa: N806
self._pipe_leak_history.loc[:, 'Pipe_A'] == data['orginal_pipe']
]
- breaking_pipe_with_pipeA_orginal_pipe = self._pipe_break_history[
+ breaking_pipe_with_pipeA_orginal_pipe = self._pipe_break_history[ # noqa: N806
self._pipe_break_history.loc[:, 'Pipe_A'] == data['orginal_pipe']
]
@@ -529,8 +529,8 @@ def addPipeDamageToRegistry(self, node_name, data):
i_break_not_zero_length = len(breaking_pipe_with_pipeA_orginal_pipe) > 0
if i_leak_not_zero_length and i_break_not_zero_length:
- raise ValueError(
- 'There are more than 1 damage with original pipe name in pipe A. it does not make sense'
+ raise ValueError( # noqa: TRY003
+ 'There are more than 1 damage with original pipe name in pipe A. it does not make sense' # noqa: EM101
)
if i_leak_not_zero_length:
temp_node_name = leaking_pipe_with_pipeA_orginal_pipe.index[0]
@@ -582,27 +582,27 @@ def addPipeDamageToRegistry(self, node_name, data):
self._pipe_break_history.loc[node_name, 'Node_B'] = data['node_B']
else:
- raise ValueError('Undefined damage type')
+ raise ValueError('Undefined damage type') # noqa: EM101, TRY003
- def addGeneralNodeDamageToRegistry(self, node_name, data=None):
+ def addGeneralNodeDamageToRegistry(self, node_name, data=None): # noqa: ARG002, N802, D102
self._gnode_damage_table.loc[node_name, 'damage_type'] = None
- def addTankDamageToRegistry(self, node_name, data=None):
+ def addTankDamageToRegistry(self, node_name, data=None): # noqa: ARG002, N802, D102
self._tank_damage_table.loc[node_name, 'damage_type'] = None
- def addPumpDamageToRegistry(self, pump_name, data):
+ def addPumpDamageToRegistry(self, pump_name, data): # noqa: N802, D102
node_name = data.start_node.name
self._pump_damage_table.loc[node_name, 'damage_type'] = None
self._pump_damage_table.loc[node_name, 'element_name'] = pump_name
self._pump_damage_table.loc[node_name, 'start_node'] = data.start_node.name
self._pump_damage_table.loc[node_name, 'end_node'] = data.end_node.name
- def addReservoirDamageToRegistry(self, node_name, data=None):
+ def addReservoirDamageToRegistry(self, node_name, data=None): # noqa: ARG002, N802, D102
self._reservoir_damage_table.loc[node_name, 'damage_type'] = None
# def assignAgentToDamage(self, element, node_name, choosed_agent_name):
- def getListAllElementOrginalName(self, element_type):
+ def getListAllElementOrginalName(self, element_type): # noqa: N802, D102
original_element_list = None
if element_type == 'PIPE':
original_element_list = self._pipe_damage_table['Orginal_element']
@@ -611,7 +611,7 @@ def getListAllElementOrginalName(self, element_type):
original_element_list = self._pump_damage_table['element_name']
elif (
- element_type == 'DISTNODE'
+ element_type == 'DISTNODE' # noqa: PLR1714
or element_type == 'GNODE'
or element_type == 'TANK'
or element_type == 'RESERVOIR'
@@ -629,11 +629,11 @@ def getListAllElementOrginalName(self, element_type):
return original_element_list
- def getDamagedLocationListByOriginalElementList(
+ def getDamagedLocationListByOriginalElementList( # noqa: N802, D102
self,
element_type,
orginal_element_list,
- iCheck=False,
+ iCheck=False, # noqa: FBT002, N803
):
res = pd.Series()
@@ -644,7 +644,7 @@ def getDamagedLocationListByOriginalElementList(
original_element_list = self._pump_damage_table['element_name']
elif (
- element_type == 'DISTNODE'
+ element_type == 'DISTNODE' # noqa: PLR1714
or element_type == 'GNODE'
or element_type == 'TANK'
or element_type == 'RESERVOIR'
@@ -655,7 +655,7 @@ def getDamagedLocationListByOriginalElementList(
else:
raise ValueError('Unkown recognized element type: ' + repr(element_type))
- for element_name, group_tag in orginal_element_list.iteritems():
+ for element_name, group_tag in orginal_element_list.iteritems(): # noqa: B007
temp = original_element_list[original_element_list == element_name]
# if len(temp)!=1:
@@ -673,11 +673,11 @@ def getDamagedLocationListByOriginalElementList(
return res
- def getDamagedLocationListByOriginalElementList_2(
+ def getDamagedLocationListByOriginalElementList_2( # noqa: N802, D102
self,
element_type,
orginal_element_list,
- iCheck=False,
+ iCheck=False, # noqa: FBT002, N803
):
if element_type == 'PIPE':
all_original_element_list = self._pipe_damage_table['Orginal_element']
@@ -686,7 +686,7 @@ def getDamagedLocationListByOriginalElementList_2(
all_original_element_list = self._pump_damage_table['element_name']
elif (
- element_type == 'DISTNODE'
+ element_type == 'DISTNODE' # noqa: PLR1714
or element_type == 'GNODE'
or element_type == 'TANK'
or element_type == 'RESERVOIR'
@@ -701,7 +701,7 @@ def getDamagedLocationListByOriginalElementList_2(
raise ValueError('Unkown recognized element type: ' + repr(element_type))
temp_bool = all_original_element_list.isin(orginal_element_list.index)
res = all_original_element_list[temp_bool]
- if iCheck == True:
+ if iCheck == True: # noqa: E712
if len(res.index) < len(orginal_element_list):
not_available_list = set(orginal_element_list) - set(res.index)
raise ValueError(
@@ -713,30 +713,30 @@ def getDamagedLocationListByOriginalElementList_2(
return res
- def getOriginalPipenodes(self, orginal_pipe_name):
+ def getOriginalPipenodes(self, orginal_pipe_name): # noqa: N802, D102
return self.original_pipe_data[orginal_pipe_name]
- def getLeakData(self, leaking_node_name):
- pipe_A = self._pipe_leak_history.loc[leaking_node_name, 'Pipe_A']
- pipe_B = self._pipe_leak_history.loc[leaking_node_name, 'Pipe_B']
+ def getLeakData(self, leaking_node_name): # noqa: N802, D102
+ pipe_A = self._pipe_leak_history.loc[leaking_node_name, 'Pipe_A'] # noqa: N806
+ pipe_B = self._pipe_leak_history.loc[leaking_node_name, 'Pipe_B'] # noqa: N806
orginal_pipe = self._pipe_leak_history.loc[leaking_node_name, 'Orginal_pipe']
return pipe_A, pipe_B, orginal_pipe
- def getCertainLeakData(self, damage_node_name, wn):
+ def getCertainLeakData(self, damage_node_name, wn): # noqa: C901, N802, D102
pipe_name_list = []
- result_pipe_A = None
- result_pipe_B = None
+ result_pipe_A = None # noqa: N806
+ result_pipe_B = None # noqa: N806
orginal_pipe = self._pipe_leak_history.loc[damage_node_name, 'Orginal_pipe']
refined_data = self._pipe_leak_history[
self._pipe_leak_history['Orginal_pipe'] == orginal_pipe
]
- for damage_point_name, data in refined_data.iterrows():
- pipe_A = data['Pipe_A']
- pipe_B = data['Pipe_B']
+ for damage_point_name, data in refined_data.iterrows(): # noqa: B007
+ pipe_A = data['Pipe_A'] # noqa: N806
+ pipe_B = data['Pipe_B'] # noqa: N806
if pipe_A not in pipe_name_list:
pipe_name_list.append(pipe_A)
@@ -748,9 +748,9 @@ def getCertainLeakData(self, damage_node_name, wn):
self._pipe_break_history['Orginal_pipe'] == orginal_pipe
]
- for damage_point_name, data in refined_data.iterrows():
- pipe_A = data['Pipe_A']
- pipe_B = data['Pipe_B']
+ for damage_point_name, data in refined_data.iterrows(): # noqa: B007
+ pipe_A = data['Pipe_A'] # noqa: N806
+ pipe_B = data['Pipe_B'] # noqa: N806
if pipe_A not in pipe_name_list:
pipe_name_list.append(pipe_A)
@@ -760,39 +760,39 @@ def getCertainLeakData(self, damage_node_name, wn):
for pipe_name in pipe_name_list:
try:
pipe = wn.get_link(pipe_name)
- except:
+ except: # noqa: S112, E722
continue
if damage_node_name == pipe.start_node_name:
- result_pipe_B = pipe_name
+ result_pipe_B = pipe_name # noqa: N806
elif damage_node_name == pipe.end_node_name:
- result_pipe_A = pipe_name
+ result_pipe_A = pipe_name # noqa: N806
- if result_pipe_A != None and result_pipe_B != None:
+ if result_pipe_A != None and result_pipe_B != None: # noqa: E711
return result_pipe_A, result_pipe_B
raise RuntimeError(
'There must be a pair of pipes for ' + repr(damage_node_name)
)
- def getBreakData(self, breaking_node_name):
- pipe_A = self._pipe_break_history.loc[breaking_node_name, 'Pipe_A']
- pipe_B = self._pipe_break_history.loc[breaking_node_name, 'Pipe_B']
+ def getBreakData(self, breaking_node_name): # noqa: N802, D102
+ pipe_A = self._pipe_break_history.loc[breaking_node_name, 'Pipe_A'] # noqa: N806
+ pipe_B = self._pipe_break_history.loc[breaking_node_name, 'Pipe_B'] # noqa: N806
orginal_pipe = self._pipe_break_history.loc[
breaking_node_name, 'Orginal_pipe'
]
- node_A = self._pipe_break_history.loc[breaking_node_name, 'Node_A']
- node_B = self._pipe_break_history.loc[breaking_node_name, 'Node_B']
+ node_A = self._pipe_break_history.loc[breaking_node_name, 'Node_A'] # noqa: N806
+ node_B = self._pipe_break_history.loc[breaking_node_name, 'Node_B'] # noqa: N806
return pipe_A, pipe_B, orginal_pipe, node_A, node_B
- def getCertainBreakData(self, damage_node_name, wn):
+ def getCertainBreakData(self, damage_node_name, wn): # noqa: C901, N802, D102
pipe_name_list = []
- result_pipe_A = None
- result_pipe_B = None
+ result_pipe_A = None # noqa: N806
+ result_pipe_B = None # noqa: N806
- node_A = self._pipe_break_history.loc[damage_node_name, 'Node_A']
- node_B = self._pipe_break_history.loc[damage_node_name, 'Node_B']
+ node_A = self._pipe_break_history.loc[damage_node_name, 'Node_A'] # noqa: N806
+ node_B = self._pipe_break_history.loc[damage_node_name, 'Node_B'] # noqa: N806
orginal_pipe = self._pipe_break_history.loc[damage_node_name, 'Orginal_pipe']
@@ -800,9 +800,9 @@ def getCertainBreakData(self, damage_node_name, wn):
self._pipe_leak_history['Orginal_pipe'] == orginal_pipe
]
- for damage_point_name, data in refined_data.iterrows():
- pipe_A = data['Pipe_A']
- pipe_B = data['Pipe_B']
+ for damage_point_name, data in refined_data.iterrows(): # noqa: B007
+ pipe_A = data['Pipe_A'] # noqa: N806
+ pipe_B = data['Pipe_B'] # noqa: N806
if pipe_A not in pipe_name_list:
pipe_name_list.append(pipe_A)
@@ -814,9 +814,9 @@ def getCertainBreakData(self, damage_node_name, wn):
self._pipe_break_history['Orginal_pipe'] == orginal_pipe
]
- for damage_point_name, data in refined_data.iterrows():
- pipe_A = data['Pipe_A']
- pipe_B = data['Pipe_B']
+ for damage_point_name, data in refined_data.iterrows(): # noqa: B007
+ pipe_A = data['Pipe_A'] # noqa: N806
+ pipe_B = data['Pipe_B'] # noqa: N806
if pipe_A not in pipe_name_list:
pipe_name_list.append(pipe_A)
@@ -826,30 +826,30 @@ def getCertainBreakData(self, damage_node_name, wn):
for pipe_name in pipe_name_list:
try:
pipe = wn.get_link(pipe_name)
- except:
+ except: # noqa: S112, E722
continue
if node_B == pipe.start_node_name:
- result_pipe_B = pipe_name
+ result_pipe_B = pipe_name # noqa: N806
elif node_A == pipe.end_node_name:
- result_pipe_A = pipe_name
+ result_pipe_A = pipe_name # noqa: N806
- if result_pipe_A != None and result_pipe_B != None:
+ if result_pipe_A != None and result_pipe_B != None: # noqa: E711
return result_pipe_A, result_pipe_B, node_A, node_B
raise RuntimeError(
'There must be a pair of pipes for ' + repr(damage_node_name)
)
- def getPipeDamageAttribute(self, attribute_name, damage_node_name=None):
+ def getPipeDamageAttribute(self, attribute_name, damage_node_name=None): # noqa: N802, D102
if attribute_name not in self._pipe_damage_table.columns:
raise ValueError('Attribute not in damage table: ' + str(attribute_name))
- if damage_node_name == None:
+ if damage_node_name == None: # noqa: E711
return self._pipe_damage_table[attribute_name]
- else:
+ else: # noqa: RET505
return self._pipe_damage_table.loc[damage_node_name, attribute_name]
- def getDamageData(self, element_type, iCopy=True):
+ def getDamageData(self, element_type, iCopy=True): # noqa: FBT002, C901, N802, N803, D102
if element_type.upper() == 'PIPE':
if iCopy:
res = self._pipe_damage_table.copy()
@@ -889,17 +889,17 @@ def getDamageData(self, element_type, iCopy=True):
raise ValueError('Unknown element type: ' + element_type)
return res
- def getOrginalElement(self, damaged_node_name, element_type):
+ def getOrginalElement(self, damaged_node_name, element_type): # noqa: N802, D102
element_damage_data = self.getDamageData(element_type, iCopy=False)
return element_damage_data.loc[damaged_node_name, 'Orginal_element']
- def getPipeData(self, attr, name=None):
- if name != None:
+ def getPipeData(self, attr, name=None): # noqa: N802, D102
+ if name != None: # noqa: E711
return self._pipe_data[attr].loc[name]
- else:
+ else: # noqa: RET505
return self._pipe_data[attr]
- def setDamageData(self, element, col, value):
+ def setDamageData(self, element, col, value): # noqa: N802, D102
if element.upper() == 'PIPE':
if col not in self._pipe_damage_table.columns:
raise ValueError('Columns is not in damage table: ' + col)
@@ -919,22 +919,22 @@ def setDamageData(self, element, col, value):
else:
raise ValueError('Element is not defined: ' + element)
- def setDamageDataByRowAndColumn(self, element, index, col, value, iCheck=False):
+ def setDamageDataByRowAndColumn(self, element, index, col, value, iCheck=False): # noqa: FBT002, N802, N803, D102
# if element.upper() == 'PIPE':
damage_table = self.getDamageData(element, iCopy=False)
if col not in damage_table.columns:
raise ValueError('Columns is not in damage table: ' + col)
- if type(index) == list or (
+ if type(index) == list or ( # noqa: E721
(index in damage_table.index and col in damage_table.columns)
- or iCheck == True
+ or iCheck == True # noqa: E712
):
damage_table.loc[index, col] = value
else:
raise ValueError(index)
- def setDamageDataByList(self, element, index_list, col, value, iCheck=False):
- if type(index_list) != list:
- raise ValueError('index_list is not data type list')
+ def setDamageDataByList(self, element, index_list, col, value, iCheck=False): # noqa: FBT002, C901, N802, N803, D102
+ if type(index_list) != list: # noqa: E721
+ raise ValueError('index_list is not data type list') # noqa: EM101, TRY003
if element.upper() == 'PIPE':
if col not in self._pipe_damage_table.columns:
@@ -943,7 +943,7 @@ def setDamageDataByList(self, element, index_list, col, value, iCheck=False):
for damage_node_name in index_list:
if (
damage_node_name in self._pipe_damage_table.index
- or iCheck == True
+ or iCheck == True # noqa: E712
):
self._pipe_damage_table.loc[damage_node_name, col] = value
else:
@@ -956,7 +956,7 @@ def setDamageDataByList(self, element, index_list, col, value, iCheck=False):
for damage_node_name in index_list:
if (
damage_node_name in self._node_damage_table.index
- or iCheck == True
+ or iCheck == True # noqa: E712
):
self._node_damage_table.loc[damage_node_name, col] = value
else:
@@ -967,7 +967,7 @@ def setDamageDataByList(self, element, index_list, col, value, iCheck=False):
raise ValueError('Columns is not in damage table: ' + col)
for gnode_name in index_list:
- if gnode_name in self._gnode_damage_table.index or iCheck == True:
+ if gnode_name in self._gnode_damage_table.index or iCheck == True: # noqa: E712
self._gnode_damage_table.loc[gnode_name, col] = value
else:
raise ValueError(gnode_name)
@@ -979,7 +979,7 @@ def setDamageDataByList(self, element, index_list, col, value, iCheck=False):
for _tank_damage_table in index_list:
if (
_tank_damage_table in self._tank_damage_table.index
- or iCheck == True
+ or iCheck == True # noqa: E712
):
self._tank_damage_table.loc[_tank_damage_table, col] = value
else:
@@ -992,7 +992,7 @@ def setDamageDataByList(self, element, index_list, col, value, iCheck=False):
for _pump_damage_table in index_list:
if (
_pump_damage_table in self._pump_damage_table.index
- or iCheck == True
+ or iCheck == True # noqa: E712
):
self._pump_damage_table.loc[_pump_damage_table, col] = value
else:
@@ -1005,7 +1005,7 @@ def setDamageDataByList(self, element, index_list, col, value, iCheck=False):
for _reservoir_damage_table in index_list:
if (
_reservoir_damage_table in self._reservoir_damage_table.index
- or iCheck == True
+ or iCheck == True # noqa: E712
):
self._reservoir_damage_table.loc[
_reservoir_damage_table, col
@@ -1015,21 +1015,21 @@ def setDamageDataByList(self, element, index_list, col, value, iCheck=False):
else:
raise ValueError('Element is not defined: ' + element)
- def updatePipeDamageTableTimeSeries(self, time):
+ def updatePipeDamageTableTimeSeries(self, time): # noqa: N802, D102
if time in self._pipe_damage_table_time_series:
- raise ValueError('Time exist in pipe damage table time history')
+ raise ValueError('Time exist in pipe damage table time history') # noqa: EM101, TRY003
self._pipe_damage_table_time_series[time] = self._pipe_damage_table.copy()
- def updateNodeDamageTableTimeSeries(self, time):
+ def updateNodeDamageTableTimeSeries(self, time): # noqa: N802, D102
if time in self._node_damage_table_time_series:
- raise ValueError('Time exist in node damage table time history')
+ raise ValueError('Time exist in node damage table time history') # noqa: EM101, TRY003
self._node_damage_table_time_series[time] = self._node_damage_table.copy()
- def updateTankTimeSeries(self, wn, time):
+ def updateTankTimeSeries(self, wn, time): # noqa: N802, D102
if time in self._tank_level_time_series:
- raise ValueError('Time exist in tank damage table time history')
+ raise ValueError('Time exist in tank damage table time history') # noqa: EM101, TRY003
tank_name_list = wn.tank_name_list
tank_level_res = pd.Series(index=tank_name_list)
@@ -1037,7 +1037,7 @@ def updateTankTimeSeries(self, wn, time):
for tank_name in wn.tank_name_list:
node = wn.get_node(tank_name)
net_water_level = node.level - node.min_level
- if net_water_level < 0.001:
+ if net_water_level < 0.001: # noqa: PLR2004
raise ValueError(
'Net Water Level in tank cannot be less than zero:'
+ repr(tank_name)
@@ -1048,10 +1048,10 @@ def updateTankTimeSeries(self, wn, time):
self._tank_level_time_series[time] = tank_level_res
- def updateRestorationIncomeWaterTimeSeries(self, wn, time):
+ def updateRestorationIncomeWaterTimeSeries(self, wn, time): # noqa: ARG002, N802, D102
if time in self._restoration_reservoir_name_time_series:
- raise ValueError(
- 'Time exist in restoration reservoir damage table time history'
+ raise ValueError( # noqa: TRY003
+ 'Time exist in restoration reservoir damage table time history' # noqa: EM101
)
res = []
for list_of_restoration in self._record_registry:
@@ -1061,16 +1061,16 @@ def updateRestorationIncomeWaterTimeSeries(self, wn, time):
self._restoration_reservoir_name_time_series[time] = res
- def updateElementDamageTable(self, element, attr, index, value, icheck=False):
+ def updateElementDamageTable(self, element, attr, index, value, icheck=False): # noqa: FBT002, C901, N802, D102
if element == 'PIPE':
- if icheck == True:
+ if icheck == True: # noqa: E712
if self._pipe_damage_table[attr].loc[index] == value:
- raise ValueError('the value is already set')
+ raise ValueError('the value is already set') # noqa: EM101, TRY003
self._pipe_damage_table.loc[index, attr] = value
elif element == 'DISTNODE':
- if icheck == True:
+ if icheck == True: # noqa: E712
if self._node_damage_table[attr].loc[index] == value:
raise ValueError(
'the value is already set in element: '
@@ -1086,7 +1086,7 @@ def updateElementDamageTable(self, element, attr, index, value, icheck=False):
self._node_damage_table.loc[index, attr] = value
elif element == 'GNODE':
- if icheck == True:
+ if icheck == True: # noqa: E712
if self._gnode_damage_table[attr].loc[index] == value:
raise ValueError(
'the value is already set in element: '
@@ -1102,7 +1102,7 @@ def updateElementDamageTable(self, element, attr, index, value, icheck=False):
self._gnode_damage_table.loc[index, attr] = value
elif element == 'TANK':
- if icheck == True:
+ if icheck == True: # noqa: E712
if self._tank_damage_table[attr].loc[index] == value:
raise ValueError(
'the value is already set in element: '
@@ -1118,7 +1118,7 @@ def updateElementDamageTable(self, element, attr, index, value, icheck=False):
self._tank_damage_table.loc[index, attr] = value
elif element == 'PUMP':
- if icheck == True:
+ if icheck == True: # noqa: E712
if self._pump_damage_table[attr].loc[index] == value:
raise ValueError(
'the value is already set in element: '
@@ -1134,7 +1134,7 @@ def updateElementDamageTable(self, element, attr, index, value, icheck=False):
self._pump_damage_table.loc[index, attr] = value
elif element == 'RESERVOIR':
- if icheck == True:
+ if icheck == True: # noqa: E712
if self._reservoir_damage_table[attr].loc[index] == value:
raise ValueError(
'the value is already set in element: '
@@ -1152,7 +1152,7 @@ def updateElementDamageTable(self, element, attr, index, value, icheck=False):
else:
raise ValueError('Unknown element: ' + element)
- def addAttrToElementDamageTable(self, element, attr, def_data):
+ def addAttrToElementDamageTable(self, element, attr, def_data): # noqa: N802, D102
if element == 'PIPE':
self.addAttrToPipeDamageTable(attr, def_data)
elif element == 'DISTNODE':
@@ -1169,61 +1169,61 @@ def addAttrToElementDamageTable(self, element, attr, def_data):
else:
raise ValueError('Undefined element: ' + element)
- def addAttrToPipeDamageTable(self, attr, def_data):
+ def addAttrToPipeDamageTable(self, attr, def_data): # noqa: N802, D102
if attr in self._pipe_damage_table.columns:
- raise ValueError('attribute already in the damage table')
+ raise ValueError('attribute already in the damage table') # noqa: EM101, TRY003
- if def_data == None:
+ if def_data == None: # noqa: E711
self._pipe_damage_table[attr] = np.nan
else:
self._pipe_damage_table[attr] = def_data
- def addAttrToDistNodeDamageTable(self, attr, def_data):
+ def addAttrToDistNodeDamageTable(self, attr, def_data): # noqa: N802, D102
if attr in self._node_damage_table.columns:
- raise ValueError('attribute already in the damage table')
+ raise ValueError('attribute already in the damage table') # noqa: EM101, TRY003
- if def_data == None:
+ if def_data == None: # noqa: E711
self._node_damage_table[attr] = np.nan
else:
self._node_damage_table[attr] = def_data
- def addAttrToGeneralNodeDamageTable(self, attr, def_data):
+ def addAttrToGeneralNodeDamageTable(self, attr, def_data): # noqa: N802, D102
if attr in self._gnode_damage_table.columns:
- raise ValueError('attribute already in the damage table')
+ raise ValueError('attribute already in the damage table') # noqa: EM101, TRY003
- if def_data == None:
+ if def_data == None: # noqa: E711
self._gnode_damage_table[attr] = np.nan
else:
self._gnode_damage_table[attr] = def_data
- def addAttrToTankDamageTable(self, attr, def_data):
+ def addAttrToTankDamageTable(self, attr, def_data): # noqa: N802, D102
if attr in self._tank_damage_table.columns:
- raise ValueError('attribute already in the damage table')
+ raise ValueError('attribute already in the damage table') # noqa: EM101, TRY003
- if def_data == None:
+ if def_data == None: # noqa: E711
self._tank_damage_table[attr] = np.nan
else:
self._tank_damage_table[attr] = def_data
- def addAttrToPumpDamageTable(self, attr, def_data):
+ def addAttrToPumpDamageTable(self, attr, def_data): # noqa: N802, D102
if attr in self._pump_damage_table.columns:
- raise ValueError('attribute already in the damage table')
+ raise ValueError('attribute already in the damage table') # noqa: EM101, TRY003
- if def_data == None:
+ if def_data == None: # noqa: E711
self._pump_damage_table[attr] = np.nan
else:
self._pump_damage_table[attr] = def_data
- def addAttrToReservoirDamageTable(self, attr, def_data):
+ def addAttrToReservoirDamageTable(self, attr, def_data): # noqa: N802, D102
if attr in self._reservoir_damage_table.columns:
- raise ValueError('attribute already in the damage table')
+ raise ValueError('attribute already in the damage table') # noqa: EM101, TRY003
- if def_data == None:
+ if def_data == None: # noqa: E711
self._reservoir_damage_table[attr] = np.nan
else:
self._reservoir_damage_table[attr] = def_data
- def iOccupied(self, node_name):
+ def iOccupied(self, node_name): # noqa: N802
"""Checks if the node is occuoied
Parameters
@@ -1236,10 +1236,10 @@ def iOccupied(self, node_name):
bool
result.
- """
+ """ # noqa: D400, D401
return node_name in self._occupancy.index
- def _getDamagedPipesRegistry(self):
+ def _getDamagedPipesRegistry(self): # noqa: N802
"""Gets the whole damage registry. Not safe to be used outside the class.
Returns
@@ -1247,10 +1247,10 @@ def _getDamagedPipesRegistry(self):
Pandas.Series
damage locations by node name.
- """
+ """ # noqa: D401
return self._pipe_node_damage_status
- def getNumberofDamagedNodes(self):
+ def getNumberofDamagedNodes(self): # noqa: N802
"""Gets numbers of Damaged locations. Counts two for broken pipes
Returns
@@ -1258,10 +1258,10 @@ def getNumberofDamagedNodes(self):
Int
Number of damaged locations by node name.
- """
+ """ # noqa: D400, D401
return len(self._pipe_node_damage_status)
- def occupyNode(self, node_name, occupier_name):
+ def occupyNode(self, node_name, occupier_name): # noqa: N802
"""Put adds node and its occupier in occupency list
Parameters
@@ -1280,17 +1280,17 @@ def occupyNode(self, node_name, occupier_name):
-------
None.
- """
+ """ # noqa: D400
if occupier_name in self._occupancy:
# if not iNodeCoupled(node_name):
- raise ValueError(
- 'Occupier name already in the list. Forget to remove another occupancy or double adding?'
+ raise ValueError( # noqa: TRY003
+ 'Occupier name already in the list. Forget to remove another occupancy or double adding?' # noqa: EM101
)
self._occupancy = self._occupancy.append(
pd.Series(data=occupier_name, index=[node_name])
)
- def removeOccupancy(self, occupier_name):
+ def removeOccupancy(self, occupier_name): # noqa: N802
"""Removes occupency in the node by occupier's name.
Parameters
@@ -1307,16 +1307,16 @@ def removeOccupancy(self, occupier_name):
-------
None.
- """
+ """ # noqa: D401
temp = self._occupancy[self._occupancy == occupier_name]
if len(temp) == 0:
- raise ValueError('there is no node occupied with this occupier name')
+ raise ValueError('there is no node occupied with this occupier name') # noqa: EM101, TRY003
ind = temp.index.tolist()
self._occupancy = self._occupancy.drop(ind)
- def whoOccupiesIn(self, node_name):
+ def whoOccupiesIn(self, node_name): # noqa: N802
"""Gets name of the occupier
Parameters
@@ -1329,10 +1329,10 @@ def whoOccupiesIn(self, node_name):
string
Occupier's name.
- """
+ """ # noqa: D400, D401
return self._occupancy[node_name]
- def whereIsOccupiedByName(self, occupier_name):
+ def whereIsOccupiedByName(self, occupier_name): # noqa: N802
"""Gets node(s) occupied by occupier
Parameters
@@ -1350,12 +1350,12 @@ def whereIsOccupiedByName(self, occupier_name):
str or series
node(s) ID.
- """
+ """ # noqa: D400, D401
temp = self._occupancy[self._occupancy == occupier_name]
if len(temp) == 0:
- raise ValueError('there is no occupancy with this name')
+ raise ValueError('there is no occupancy with this name') # noqa: EM101, TRY003
- def getListofFreeRepairAgents(self):
+ def getListofFreeRepairAgents(self): # noqa: N802
"""MAYBE NOT NEEDED Gets a list of free agents. Not needed anymore.
Returns
@@ -1364,14 +1364,14 @@ def getListofFreeRepairAgents(self):
DESCRIPTION.
"""
- working_RepairAgents = set(self._occupancy.tolist())
- RepairAgentsNameList = self._pipe_RepairAgentNameRegistry
- Free_RepairAgents = [
+ working_RepairAgents = set(self._occupancy.tolist()) # noqa: N806
+ RepairAgentsNameList = self._pipe_RepairAgentNameRegistry # noqa: N806
+ Free_RepairAgents = [ # noqa: N806
name for name in RepairAgentsNameList if name not in working_RepairAgents
]
- return Free_RepairAgents
+ return Free_RepairAgents # noqa: RET504
- def coupleTwoBreakNodes(self, break_point_1_name, break_point_2_name):
+ def coupleTwoBreakNodes(self, break_point_1_name, break_point_2_name): # noqa: N802
"""Couples two nodes in registry for the time which we have a break.
PLEASE NOTE THAT THE FIRST NODE MUST BE THE ONE CONNECTED TO THE
MAIN(ORIGINAL) PIPE THAT IS BROKEN NOW.
@@ -1387,12 +1387,12 @@ def coupleTwoBreakNodes(self, break_point_1_name, break_point_2_name):
-------
None.
- """
+ """ # noqa: D205
self._pipe_break_node_coupling[break_point_1_name] = break_point_2_name
self._pipe_break_node_coupling[break_point_2_name] = break_point_1_name
self._break_point_attached_to_mainPipe.append(break_point_1_name)
- def getCoupledBreakNode(self, break_point_name):
+ def getCoupledBreakNode(self, break_point_name): # noqa: N802
"""Gets the coupled node given the first coupled node, and checks if the
given coupled node is connected to the main pipe.
@@ -1409,17 +1409,17 @@ def getCoupledBreakNode(self, break_point_name):
If the given (first node) is the one connected to the main(original)
pipe
- """
+ """ # noqa: D205, D401
out1 = self._pipe_break_node_coupling[break_point_name]
- is_breakPoint_1_attacjedToMainPipe = (
+ is_breakPoint_1_attacjedToMainPipe = ( # noqa: N806
break_point_name in self._break_point_attached_to_mainPipe
)
return out1, is_breakPoint_1_attacjedToMainPipe
- def iNodeCoupled(self, node_name):
+ def iNodeCoupled(self, node_name): # noqa: N802, D102
return node_name in self._pipe_break_node_coupling
- def iDamagedPipeReminded(self):
+ def iDamagedPipeReminded(self): # noqa: N802, D102
damaged_nodes = self._pipe_node_damage_status.index
if len(damaged_nodes) == 0:
return False
@@ -1427,13 +1427,13 @@ def iDamagedPipeReminded(self):
for node_name in iter(damaged_nodes):
if node_name not in self._occupancy.index:
is_reminded = True
- return is_reminded
+ return is_reminded # noqa: RET504
return is_reminded
- def getOtherCoupledBreakPoint(self, node_name):
+ def getOtherCoupledBreakPoint(self, node_name): # noqa: N802, D102
return self._pipe_break_node_coupling[node_name]
- def removeCoupledBreakNodes(self, break_point_name):
+ def removeCoupledBreakNodes(self, break_point_name): # noqa: N802
"""Removes the coupled
Parameters
@@ -1448,7 +1448,7 @@ def removeCoupledBreakNodes(self, break_point_name):
second : str
Name of second node(connected to the pipe created after break)
- """
+ """ # noqa: D400, D401
other_coupled_break_point = self._pipe_break_node_coupling.pop(
break_point_name
)
@@ -1465,23 +1465,23 @@ def removeCoupledBreakNodes(self, break_point_name):
second = break_point_name
return first, second
- def recordPipeDamageTable(self, stop_time):
+ def recordPipeDamageTable(self, stop_time): # noqa: N802, D102
if self.settings['result_details'] == 'minimal':
return None
if stop_time in self._pipe_damage_table_history:
return ValueError('Time exists in pipe damage hostry: ' + str(stop_time))
- self._pipe_damage_table_history['stop_time'] = (
+ self._pipe_damage_table_history['stop_time'] = ( # noqa: RET503
self._pipe_damage_table_history
)
- def getMostLeakAtCheck(self, real_node_name_list, element_type):
+ def getMostLeakAtCheck(self, real_node_name_list, element_type): # noqa: N802, D102
if element_type == 'DISTNODE':
total_demand = self._node_damage_table.loc[
real_node_name_list, 'Demand2'
]
total_demand.loc[total_demand[total_demand.isna()].index] = 0
return total_demand
- elif element_type == 'PIPE':
+ elif element_type == 'PIPE': # noqa: RET505
leak = self._pipe_damage_table.loc[real_node_name_list, 'LeakAtCheck']
leak.loc[leak[leak.isna()].index] = 0
return leak
diff --git a/modules/systemPerformance/REWET/REWET/restoration/restorationlog.py b/modules/systemPerformance/REWET/REWET/restoration/restorationlog.py
index c7caede7a..5098acb3a 100644
--- a/modules/systemPerformance/REWET/REWET/restoration/restorationlog.py
+++ b/modules/systemPerformance/REWET/REWET/restoration/restorationlog.py
@@ -1,14 +1,14 @@
"""Created on Sun Jan 31 21:54:19 2021
@author: snaeimi
-"""
+""" # noqa: INP001, D400
from collections import OrderedDict
import pandas as pd
-class RestorationLog:
+class RestorationLog: # noqa: D101
def __init__(self, settings):
self.settings = settings
self._agent_state_log_book = pd.DataFrame(
@@ -41,19 +41,19 @@ def __init__(self, settings):
)
self.crew_history = OrderedDict()
- def updateAgentHistory(self, agent_table, time):
- if self.settings['record_restoration_agent_logs'] == False:
+ def updateAgentHistory(self, agent_table, time): # noqa: N802, D102
+ if self.settings['record_restoration_agent_logs'] == False: # noqa: E712
return
self.crew_history[time] = agent_table.copy()
- def updateAgentLogBook(self, agent_table, time):
- if self.settings['record_restoration_agent_logs'] == False:
+ def updateAgentLogBook(self, agent_table, time): # noqa: N802, D102
+ if self.settings['record_restoration_agent_logs'] == False: # noqa: E712
return
- for agent_name, line in agent_table.iterrows():
+ for agent_name, line in agent_table.iterrows(): # noqa: B007
temp = None
- if line['active'] == True and line['ready'] == False:
+ if line['active'] == True and line['ready'] == False: # noqa: E712
data = line['data']
_x = data.current_location.coord.x
_y = data.current_location.coord.y
@@ -61,8 +61,8 @@ def updateAgentLogBook(self, agent_table, time):
_type = data.agent_type
_lable = data.cur_job_entity
_action = data.cur_job_action
- _EFN = data.cur_job_effect_definition_name
- _MN = data.cur_job_method_name
+ _EFN = data.cur_job_effect_definition_name # noqa: N806
+ _MN = data.cur_job_method_name # noqa: N806
_loc = data.cur_job_location
temp = pd.Series(
@@ -97,7 +97,7 @@ def updateAgentLogBook(self, agent_table, time):
temp, ignore_index=True
)
- def addAgentActionToLogBook(
+ def addAgentActionToLogBook( # noqa: N802, D102
self,
agent_name,
node_name,
@@ -108,9 +108,9 @@ def addAgentActionToLogBook(
travel_time,
effect_definition_name,
method_name,
- iFinished=True,
+ iFinished=True, # noqa: FBT002, N803
):
- if self.settings['record_restoration_agent_logs'] == False:
+ if self.settings['record_restoration_agent_logs'] == False: # noqa: E712
return
temp = pd.Series(
@@ -143,21 +143,21 @@ def addAgentActionToLogBook(
temp, ignore_index=True
)
- def addEndTimegentActionToLogBook(self, agent_name, time, modified_end_time):
- if self.settings['record_restoration_agent_logs'] == False:
+ def addEndTimegentActionToLogBook(self, agent_name, time, modified_end_time): # noqa: N802, D102
+ if self.settings['record_restoration_agent_logs'] == False: # noqa: E712
return
temp = self._agent_action_log_book[['Agent', 'Time']] == [agent_name, time]
temp = self._agent_action_log_book[temp.all(1)]
if len(temp) > 1:
- raise ValueError(
- 'There are too many agents record with the same time and name'
+ raise ValueError( # noqa: TRY003
+ 'There are too many agents record with the same time and name' # noqa: EM101
)
- elif len(temp) == 0:
- raise ValueError(
- 'There is not agent agent record with this time and name'
+ elif len(temp) == 0: # noqa: RET506
+ raise ValueError( # noqa: TRY003
+ 'There is not agent agent record with this time and name' # noqa: EM101
)
ind = temp.index
diff --git a/modules/systemPerformance/REWET/REWET/timeline.py b/modules/systemPerformance/REWET/REWET/timeline.py
index 481643af7..4e6ff0923 100644
--- a/modules/systemPerformance/REWET/REWET/timeline.py
+++ b/modules/systemPerformance/REWET/REWET/timeline.py
@@ -1,11 +1,11 @@
"""Created on Sat Dec 26 02:00:40 2020
@author: snaeimi
-"""
+""" # noqa: D400
import logging
-import numpy
+import numpy # noqa: ICN001
import pandas as pd
logger = logging.getLogger(__name__)
@@ -13,7 +13,7 @@
EVENT_TYPE = ['dmg', 'rpr', 'rst'] # event types are defined here
-class Timeline:
+class Timeline: # noqa: D101
# =============================================================================
# This class has many functions that can make a lot of exceptions.
# We need to modify their codes, so their usage be safe and bug-free.
@@ -21,7 +21,7 @@ class Timeline:
def __init__(self, simulation_end_time, restoration, registry):
if simulation_end_time < 0:
- raise ValueError('simulation end time must be zero or bigger than zero')
+ raise ValueError('simulation end time must be zero or bigger than zero') # noqa: EM101, TRY003
self._current_time = 0
self._event_time_register = pd.DataFrame(
dtype='bool'
@@ -42,9 +42,9 @@ def __init__(self, simulation_end_time, restoration, registry):
self._current_time_indexOfIndex = 0
self.registry = registry
- def iContinue(self):
+ def iContinue(self): # noqa: N802, D102
if (
- self._current_time == 0 and self._iFirst_time_zero == True
+ self._current_time == 0 and self._iFirst_time_zero == True # noqa: E712
): # So that the other condition happens
self._iFirst_time_zero = False
@@ -54,10 +54,10 @@ def iContinue(self):
if abs(self._simulation_end_time - self._current_time) <= abs(
self._ending_Event_ignore_time
):
- print('End_Time_Reached')
+ print('End_Time_Reached') # noqa: T201
return False
- simulation_minimum_time = self.restoration._registry.settings[
+ simulation_minimum_time = self.restoration._registry.settings[ # noqa: SLF001
'minimum_simulation_time'
]
minimum_simulation_time_satisfied = (
@@ -70,20 +70,20 @@ def iContinue(self):
'node_demand_temination'
]
- if minimum_simulation_time_satisfied == True:
- if consider_last_sequence_termination == True:
+ if minimum_simulation_time_satisfied == True: # noqa: E712
+ if consider_last_sequence_termination == True: # noqa: E712
if self.restoration.iRestorationStopTime():
- print('Last_sequence_termination')
+ print('Last_sequence_termination') # noqa: T201
return False
- if consider_node_demand_temination == True:
+ if consider_node_demand_temination == True: # noqa: E712
if self.iFunctionalityRequirementReached():
- print('FunctionalityRequirementReached')
+ print('FunctionalityRequirementReached') # noqa: T201
return False
return True
- def getNextTime(self):
+ def getNextTime(self): # noqa: N802, D102
if (
not self._event_time_register.index.is_monotonic_increasing
): # for just in case if the index of event time register is not sorted
@@ -93,29 +93,29 @@ def getNextTime(self):
self._event_time_register.index[self._current_time_indexOfIndex]
!= self._current_time
):
- raise RuntimeError(
- 'A possible violation of time in timeline event variables and/or event time registry'
+ raise RuntimeError( # noqa: TRY003
+ 'A possible violation of time in timeline event variables and/or event time registry' # noqa: EM101
)
next_time = self._event_time_register.index[
self._current_time_indexOfIndex + 1
]
- return next_time
+ return next_time # noqa: RET504
- def getCurrentStopTime(self):
+ def getCurrentStopTime(self): # noqa: N802, D102
return int(self._current_time)
- def iCurrentTimeRepairEvent(self):
+ def iCurrentTimeRepairEvent(self): # noqa: N802, D102
return self._event_time_register['rpr'].loc[self._current_time]
- def iCurenttimeRestorationEvent(self):
- print('current_time is= ' + str(self._current_time))
- print(self._event_time_register['rst'].loc[self._current_time])
+ def iCurenttimeRestorationEvent(self): # noqa: N802, D102
+ print('current_time is= ' + str(self._current_time)) # noqa: T201
+ print(self._event_time_register['rst'].loc[self._current_time]) # noqa: T201
return self._event_time_register['rst'].loc[self._current_time]
- def iCurrentTimeDamageEvent(self):
+ def iCurrentTimeDamageEvent(self): # noqa: N802, D102
return self._event_time_register['dmg'].loc[self._current_time]
- def addEventTime(self, event_distinct_time, event_type='dmg'):
+ def addEventTime(self, event_distinct_time, event_type='dmg'): # noqa: N802
"""This function is a low-level function to add event type in an already-
existing event_time in event_time_register. FOR NOW THE DISTINCT TIMES
CAN BE A LIST OR A LIST. MAYBE IN THE FUTURE WE CAN DECIDE WEATHER IT
@@ -143,41 +143,41 @@ def addEventTime(self, event_distinct_time, event_type='dmg'):
-------
None.
- """
- if type(event_distinct_time) != pd.core.series.Series:
+ """ # noqa: D205, D401, D404
+ if type(event_distinct_time) != pd.core.series.Series: # noqa: E721
if (
- type(event_distinct_time) == numpy.float64
- or type(event_distinct_time) == int
- or type(event_distinct_time) == float
- or type(event_distinct_time) == list
+ type(event_distinct_time) == numpy.float64 # noqa: E721
+ or type(event_distinct_time) == int # noqa: E721
+ or type(event_distinct_time) == float # noqa: E721
+ or type(event_distinct_time) == list # noqa: E721
):
event_distinct_time = pd.Series(
data=event_distinct_time, dtype='int64'
)
else:
- print(type(event_distinct_time))
- raise ValueError('event_distinct_time must be pandas.Series type')
+ print(type(event_distinct_time)) # noqa: T201
+ raise ValueError('event_distinct_time must be pandas.Series type') # noqa: EM101, TRY003
if event_type not in EVENT_TYPE:
- raise ValueError('unrecognized value for event_type')
+ raise ValueError('unrecognized value for event_type') # noqa: EM101, TRY003
# check for duplicate in time index. if there is duplicate, we will only change the true and false value in the DataFrame
temp_to_pop = []
- logger.debug('event distinct time ' + repr(event_distinct_time))
+ logger.debug('event distinct time ' + repr(event_distinct_time)) # noqa: G003
- for i, i_time in event_distinct_time.items():
+ for i, i_time in event_distinct_time.items(): # noqa: B007, PERF102
if i_time in self._event_time_register.index:
self._event_time_register.loc[i_time, event_type] = True
self.checkAndAmendTime()
temp_to_pop.append(i_time)
- logger.debug('temp_to_pop' + repr(temp_to_pop))
+ logger.debug('temp_to_pop' + repr(temp_to_pop)) # noqa: G003
for i_time in temp_to_pop:
ind = event_distinct_time[event_distinct_time == i_time].index[0]
event_distinct_time.pop(ind)
if len(event_distinct_time) != 0:
- for i, i_time in event_distinct_time.items():
+ for i, i_time in event_distinct_time.items(): # noqa: PERF102
self._event_time_register.loc[i_time, EVENT_TYPE] = [
False for i in range(len(EVENT_TYPE))
]
@@ -185,7 +185,7 @@ def addEventTime(self, event_distinct_time, event_type='dmg'):
self._event_time_register = self._event_time_register.sort_index()
self.checkAndAmendTime()
- def iEventTypeAt(self, begin_time, event_type):
+ def iEventTypeAt(self, begin_time, event_type): # noqa: N802
"""Checks if an event type is in event registry at the time of begin_time
----------
begin_time : int
@@ -198,15 +198,15 @@ def iEventTypeAt(self, begin_time, event_type):
bool
rResult if such data exist or not
- """
+ """ # noqa: D205, D400, D401
if begin_time not in self._event_time_register.index:
return False
- if self._event_time_register[event_type].loc[begin_time]:
+ if self._event_time_register[event_type].loc[begin_time]: # noqa: SIM103
return True
- else:
+ else: # noqa: RET505
return False
- def checkAndAmendTime(self):
+ def checkAndAmendTime(self): # noqa: N802
"""Checks if the time of event is higher than the sim time.Also checks
if the the ending event has any thing event(nothings must be true).
@@ -218,13 +218,13 @@ def checkAndAmendTime(self):
-------
None.
- """
+ """ # noqa: D205, D401
first_length = len(self._event_time_register.index)
self._event_time_register = self._event_time_register[
self._event_time_register.index <= self._simulation_end_time
]
if first_length > len(self._event_time_register):
- print(
+ print( # noqa: T201
'here was '
+ repr(first_length - len(self._event_time_register))
+ 'amended'
@@ -237,7 +237,7 @@ def checkAndAmendTime(self):
# if self._event_time_register[self._event_time_register.index==self._simulation_end_time].empty==True:
# self._event_time_register=self._event_time_register.append(pd.DataFrame(data = False , index = [self._simulation_end_time], columns = EVENT_TYPE))
- def iFunctionalityRequirementReached(self):
+ def iFunctionalityRequirementReached(self): # noqa: C901, N802, D102
logger.debug('Func: node functionality')
ratio_criteria = self.registry.settings.process[
'node_demand_termination_ratio'
@@ -246,11 +246,11 @@ def iFunctionalityRequirementReached(self):
'node_demand_termination_time'
]
stop_time = self.getCurrentStopTime()
- if self.registry.if_first_event_occured == False:
+ if self.registry.if_first_event_occured == False: # noqa: RET503, E712
return False
- elif self.registry.if_first_event_occured == True:
- if self.registry.result == None:
+ elif self.registry.if_first_event_occured == True: # noqa: RET505, E712
+ if self.registry.result == None: # noqa: E711
return False
# for checking if we have still any leak in the system, since we
@@ -287,7 +287,7 @@ def iFunctionalityRequirementReached(self):
if default_pattern is not None:
node_pattern_list = [
(node.name, node.demand_timeseries_list.pattern_list()[0])
- if node.demand_timeseries_list.pattern_list()[0] != None
+ if node.demand_timeseries_list.pattern_list()[0] != None # noqa: E711
else (node.name, default_pattern)
for node in demand_nodes_list
]
@@ -295,7 +295,7 @@ def iFunctionalityRequirementReached(self):
node_pattern_list = [
(node.name, node.demand_timeseries_list.pattern_list()[0])
for node in demand_nodes_list
- if node.demand_timeseries_list.pattern_list()[0] != None
+ if node.demand_timeseries_list.pattern_list()[0] != None # noqa: E711
]
base_demand_list = [node.base_demand for node in demand_nodes_list]
@@ -367,8 +367,8 @@ def iFunctionalityRequirementReached(self):
ratio = demand_met.mean() / pre_event_demand.mean()
mean_of_ratio_satisfied = (ratio >= ratio_criteria).sum() / len(ratio)
- logger.debug('ratio that is= ' + repr(mean_of_ratio_satisfied))
- if (ratio >= ratio_criteria).all():
+ logger.debug('ratio that is= ' + repr(mean_of_ratio_satisfied)) # noqa: G003
+ if (ratio >= ratio_criteria).all(): # noqa: SIM103
return True
- else:
+ else: # noqa: RET505
return False
diff --git a/modules/systemPerformance/REWET/REWET_Wrapper.py b/modules/systemPerformance/REWET/REWET_Wrapper.py
index 5ae1f39a5..cbb99b361 100644
--- a/modules/systemPerformance/REWET/REWET_Wrapper.py
+++ b/modules/systemPerformance/REWET/REWET_Wrapper.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2024 The Regents of the University of California
# Copyright (c) 2024 Leland Stanford Junior University
#
@@ -57,22 +57,22 @@
# except:
# This is only for now
# print("HERE")
-this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve()
+this_dir = Path(os.path.dirname(os.path.abspath(__file__))).resolve() # noqa: PTH100, PTH120
# main_dir = this_dir.parent
sys.path.insert(0, str(this_dir / 'REWET'))
-from initial import Starter
-from Result_Project import Project_Result
+from initial import Starter # noqa: E402
+from Result_Project import Project_Result # noqa: E402
-def createScnearioList(run_directory, scn_number):
- damage_input_dir = os.path.join(
+def createScnearioList(run_directory, scn_number): # noqa: N802, D103
+ damage_input_dir = os.path.join( # noqa: PTH118
run_directory, 'Results', 'WaterDistributionNetwork', 'damage_input'
)
- if not os.path.exists(damage_input_dir):
- os.makedirs(damage_input_dir)
+ if not os.path.exists(damage_input_dir): # noqa: PTH110
+ os.makedirs(damage_input_dir) # noqa: PTH103
# REWET_input_data["damage_input_dir"] = damage_input_dir
@@ -100,7 +100,7 @@ def createScnearioList(run_directory, scn_number):
return scenario_list, prefix
-def chooseARandomPreefix(damage_input_dir):
+def chooseARandomPreefix(damage_input_dir): # noqa: N802
"""Choses a random prefix for sceranio and pipe, node, pump and tank damage
file. The is important to find and unused prefix so if this script is being
ran in parallel, then files are not being overwritten.
@@ -115,7 +115,7 @@ def chooseARandomPreefix(damage_input_dir):
random_prefix : str
The Chosen random prefix string.
- """
+ """ # noqa: D205
number_of_prefix = 4
dir_list = os.listdir(damage_input_dir)
@@ -155,10 +155,10 @@ def chooseARandomPreefix(damage_input_dir):
-------
None.
- """
+ """ # noqa: RET503, W291
-def getDLFileName(run_dir, dl_file_path, scn_number):
+def getDLFileName(run_dir, dl_file_path, scn_number): # noqa: N802
"""If dl_file_path is not given, the path is acquired from rwhale input data.
Parameters
@@ -175,11 +175,11 @@ def getDLFileName(run_dir, dl_file_path, scn_number):
None.
"""
- if dl_file_path == None:
+ if dl_file_path == None: # noqa: E711
file_name = f'WaterDistributionNetwork_{scn_number}.json'
- run_dir = run_dir
- file_dir = os.path.join(run_dir, 'Results', 'WaterDistributionNetwork')
- file_path = os.path.join(file_dir, file_name)
+ run_dir = run_dir # noqa: PLW0127
+ file_dir = os.path.join(run_dir, 'Results', 'WaterDistributionNetwork') # noqa: PTH118
+ file_path = os.path.join(file_dir, file_name) # noqa: PTH118
else:
file_path = dl_file_path
file_dir = Path(dl_file_path).parent
@@ -187,7 +187,7 @@ def getDLFileName(run_dir, dl_file_path, scn_number):
return file_path, file_dir
-def setSettingsData(input_json, REWET_input_data):
+def setSettingsData(input_json, REWET_input_data): # noqa: ARG001, N802, N803, D103
policy_file_name = rwhale_input_Data['SystemPerformance'][
'WaterDistributionNetwork'
]['Policy Definition']
@@ -195,7 +195,7 @@ def setSettingsData(input_json, REWET_input_data):
'WaterDistributionNetwork'
]['Policy DefinitionPath']
- policy_config_file = os.path.join(Path(policy_file_path), Path(policy_file_name))
+ policy_config_file = os.path.join(Path(policy_file_path), Path(policy_file_name)) # noqa: PTH118
REWET_input_data['settings']['RUN_TIME'] = rwhale_input_Data[
'SystemPerformance'
@@ -228,7 +228,7 @@ def setSettingsData(input_json, REWET_input_data):
'SystemPerformance'
]['WaterDistributionNetwork']['minimum_job_time']
REWET_input_data['settings']['Restortion_config_file'] = (
- policy_config_file # TODO: SINA unmark it
+ policy_config_file # TODO: SINA unmark it # noqa: TD002
)
p = rwhale_input_Data['SystemPerformance']['WaterDistributionNetwork'][
@@ -349,7 +349,7 @@ def setSettingsData(input_json, REWET_input_data):
# Not Supposed to be in R2DTool GUI ############
REWET_input_data['settings']['minimum_simulation_time'] = (
- 0 # TODO : HERE #REWET_input_data["event_time"] + REWET_input_data["settings"]["simulation_time_step"]
+ 0 # TODO : HERE #REWET_input_data["event_time"] + REWET_input_data["settings"]["simulation_time_step"] # noqa: TD002
)
REWET_input_data['settings']['save_time_step'] = True
REWET_input_data['settings']['record_restoration_agent_logs'] = True
@@ -385,11 +385,11 @@ def setSettingsData(input_json, REWET_input_data):
REWET_input_data['settings']['Pipe_damage_input_method'] = 'pickle'
-def create_path(path):
+def create_path(path): # noqa: D103
if isinstance(path, str):
path = Path(path)
not_existing_hir = []
- while os.path.exists(path) == False:
+ while os.path.exists(path) == False: # noqa: PTH110, E712
not_existing_hir.append(path.name)
path = path.parent
@@ -402,7 +402,7 @@ def create_path(path):
if __name__ == '__main__':
# Setting arg parser
- argParser = argparse.ArgumentParser('Preprocess rwhale workflow to REWET input.')
+ argParser = argparse.ArgumentParser('Preprocess rwhale workflow to REWET input.') # noqa: N816
argParser.add_argument(
'--input',
@@ -436,9 +436,9 @@ def create_path(path):
# learning about parallel or serial settings
- numP = 1
- procID = 0
- doParallel = False
+ numP = 1 # noqa: N816
+ procID = 0 # noqa: N816
+ doParallel = False # noqa: N816
mpi_spec = importlib.util.find_spec('mpi4py')
found = mpi_spec is not None
@@ -446,17 +446,17 @@ def create_path(path):
from mpi4py import MPI
comm = MPI.COMM_WORLD
- numP = comm.Get_size()
- procID = comm.Get_rank()
- if numP < 2:
- doParallel = False
- numP = 1
- procID = 0
- print(
+ numP = comm.Get_size() # noqa: N816
+ procID = comm.Get_rank() # noqa: N816
+ if numP < 2: # noqa: PLR2004
+ doParallel = False # noqa: N816
+ numP = 1 # noqa: N816
+ procID = 0 # noqa: N816
+ print( # noqa: T201
'Parallel running is not possible. Number of CPUS are are not enough.'
)
else:
- doParallel = True
+ doParallel = True # noqa: N816
# Setting up run settings
@@ -464,7 +464,7 @@ def create_path(path):
REWET_input_data['settings'] = {}
# print(parser_data.input)
- rwhale_input_Data = preprocessorIO.readJSONFile(parser_data.input)
+ rwhale_input_Data = preprocessorIO.readJSONFile(parser_data.input) # noqa: N816
setSettingsData(rwhale_input_Data, REWET_input_data)
event_time = rwhale_input_Data['SystemPerformance']['WaterDistributionNetwork'][
'eventTime'
@@ -486,11 +486,11 @@ def create_path(path):
'WaterDistributionNetwork'
]['ApplicationData']['Realizations']
- REWET_input_data['settings']['result_directory'] = os.path.join(
+ REWET_input_data['settings']['result_directory'] = os.path.join( # noqa: PTH118
run_directory, 'Results', 'WaterDistributionNetwork', 'REWET_Result'
)
- REWET_input_data['settings']['temp_directory'] = os.path.join(
+ REWET_input_data['settings']['temp_directory'] = os.path.join( # noqa: PTH118
run_directory, 'Results', 'WaterDistributionNetwork', 'REWET_RunFiles'
)
@@ -502,7 +502,7 @@ def create_path(path):
damage_save_path_hir = damage_save_path
create_path(damage_save_path_hir)
- if parser_data.number == None:
+ if parser_data.number == None: # noqa: E711
scneario_list_path = damage_save_path / 'scenario_table.xlsx'
else:
scneario_list_path = (
@@ -526,7 +526,7 @@ def create_path(path):
scenario_table = preprocessorIO.create_scneario_table()
- if parser_data.number == None:
+ if parser_data.number == None: # noqa: E711
Damage_file_name = list(range(number_of_realization))
else:
@@ -557,7 +557,7 @@ def create_path(path):
)
command = (
- 'python '
+ 'python ' # noqa: ISC003
+ 'C:\\Users\\naeim\\Desktop\\REWET\\main.py -j '
+ str(settings_json_file_path)
)
@@ -584,7 +584,7 @@ def create_path(path):
)
system_std_out = sys.stdout
- with open(rewet_log_path, 'w') as log_file:
+ with open(rewet_log_path, 'w') as log_file: # noqa: PTH123
sys.stdout = log_file
REWET_starter = Starter()
REWET_starter.run(settings_json_file_path)
@@ -597,13 +597,13 @@ def create_path(path):
requested_result = ['DL', 'QN']
substitute_ft = {'DL': 'Delivery', 'QN': 'Quantity'}
consistency_time_window = 0 # 7200
- iConsider_leak = False # True
+ iConsider_leak = False # True # noqa: N816
# the following does not matter if iConsider_leak is false
leak_ratio = {'DL': 0.75, 'QN': 0}
sub_asset_list = ['Junction', 'Pipe', 'Reservoir']
- sub_asset_name_to_id = dict()
- sub_asset_id_to_name = dict()
+ sub_asset_name_to_id = dict() # noqa: C408
+ sub_asset_id_to_name = dict() # noqa: C408
for sub_asset in sub_asset_list:
sc_geojson_file = preprocessorIO.readJSONFile(sc_geojson)
sub_asset_data = [
@@ -635,11 +635,11 @@ def create_path(path):
)
)
- for scn_name, row in p.project.scenario_list.iterrows():
+ for scn_name, row in p.project.scenario_list.iterrows(): # noqa: B007
realization_number = int(scn_name.strip('SCN_'))
for single_requested_result in requested_result:
if (
- single_requested_result == 'DL'
+ single_requested_result == 'DL' # noqa: PLR1714
or single_requested_result == 'QN'
):
# Running Output module's method to get DL time series status
@@ -675,12 +675,12 @@ def create_path(path):
res_agg[single_requested_result] = res[
single_requested_result
].to_dict()
- for key in res_agg[single_requested_result].keys():
+ for key in res_agg[single_requested_result].keys(): # noqa: SIM118
res_agg[single_requested_result][key] = [
res_agg[single_requested_result][key]
]
else:
- for key in res_agg[single_requested_result].keys():
+ for key in res_agg[single_requested_result].keys(): # noqa: SIM118
res_agg[single_requested_result][key].append(
res[single_requested_result][key]
)
@@ -695,7 +695,7 @@ def create_path(path):
/ cur_json_file_name
)
- with open(cur_json_file_path) as f:
+ with open(cur_json_file_path) as f: # noqa: PTH123
json_data = json.load(f)
for single_requested_result in requested_result:
@@ -707,10 +707,10 @@ def create_path(path):
'Junction', {}
)
- for junction_name in req_result.keys():
+ for junction_name in req_result.keys(): # noqa: SIM118
junction_id = sub_asset_name_to_id['Junction'][junction_name]
cur_junction = junction_json_data.get(junction_id, {})
- cur_junction_SP = cur_junction.get('SystemPerformance', {})
+ cur_junction_SP = cur_junction.get('SystemPerformance', {}) # noqa: N816
cur_junction_SP[result_key] = float(req_result[junction_name])
cur_junction['SystemPerformance'] = cur_junction_SP
@@ -720,11 +720,11 @@ def create_path(path):
junction_json_data
)
- with open(cur_json_file_path, 'w') as f:
+ with open(cur_json_file_path, 'w') as f: # noqa: PTH123
json_data = json.dump(json_data, f, indent=2)
- res_agg_mean = dict()
- res_agg_std = dict()
+ res_agg_mean = dict() # noqa: C408
+ res_agg_std = dict() # noqa: C408
for single_requested_result in requested_result:
res_agg[single_requested_result] = pd.DataFrame(
res_agg[single_requested_result]
@@ -748,12 +748,12 @@ def create_path(path):
inp_json = preprocessorIO.readJSONFile(sc_geojson)
inp_json = inp_json['features']
for WDNtype in ['Reservoir', 'Junction']:
- json_to_attach = dict()
+ json_to_attach = dict() # noqa: C408
for ft in inp_json:
prop = ft['properties']
if prop['type'] == WDNtype:
- id = str(ft['id'])
- generalInfo = dict()
+ id = str(ft['id']) # noqa: A001
+ generalInfo = dict() # noqa: C408, N816
json_geometry = ft['geometry']
shapely_geometry = geometry.shape(json_geometry)
wkt_geometry = shapely_geometry.wkt
@@ -765,7 +765,7 @@ def create_path(path):
if key == 'id':
continue
generalInfo.update({key: item})
- R2Dres = dict()
+ R2Dres = dict() # noqa: C408
asset_name = sub_asset_id_to_name[WDNtype][id]
for single_requested_result in requested_result:
if asset_name not in res_agg_mean[single_requested_result].index:
@@ -790,7 +790,7 @@ def create_path(path):
{id: {'GeneralInformation': generalInfo, 'R2Dres': R2Dres}}
)
det_json['WaterDistributionNetwork'].update({WDNtype: json_to_attach})
- with open(det_json_path, 'w') as f:
+ with open(det_json_path, 'w') as f: # noqa: PTH123
json.dump(det_json, f, indent=2)
ts_result_json_path = cur_json_file_path = (
@@ -814,6 +814,6 @@ def create_path(path):
i
] = time_series_result[single_requested_result][i].to_dict()
- with open(ts_result_json_path, 'w') as f:
+ with open(ts_result_json_path, 'w') as f: # noqa: PTH123
json.dump(time_series_result_struc, f, indent=2)
- print('here')
+ print('here') # noqa: T201
diff --git a/modules/systemPerformance/REWET/damage_convertor.py b/modules/systemPerformance/REWET/damage_convertor.py
index 9a2e8bc64..9b70d66e6 100644
--- a/modules/systemPerformance/REWET/damage_convertor.py
+++ b/modules/systemPerformance/REWET/damage_convertor.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2024 The Regents of the University of California
# Copyright (c) 2024 Leland Stanford Junior University
#
@@ -45,7 +45,7 @@
CBIG_int = int(1e9)
-def createPipeDamageInputForREWET(pipe_damage_data, run_dir, event_time, sc_geojson):
+def createPipeDamageInputForREWET(pipe_damage_data, run_dir, event_time, sc_geojson): # noqa: N802
"""Creates REWET-style piep damage file.
Parameters
@@ -65,8 +65,8 @@ def createPipeDamageInputForREWET(pipe_damage_data, run_dir, event_time, sc_geoj
pipe_damage_list : Pandas Series
REWET-style pipe damage file.
- """
- pipe_id_list = [key for key in pipe_damage_data]
+ """ # noqa: D401
+ pipe_id_list = [key for key in pipe_damage_data] # noqa: C416
damage_list = []
damage_time = event_time
@@ -84,17 +84,17 @@ def createPipeDamageInputForREWET(pipe_damage_data, run_dir, event_time, sc_geoj
cur_data = pipe_damage_data[pipe_id]
cur_damage = cur_data['Damage']
- cur_demand = cur_data['Demand']
+ cur_demand = cur_data['Demand'] # noqa: F841
aim_data = findAndReadAIMFile(
pipe_id,
- os.path.join('Results', 'WaterDistributionNetwork', 'Pipe'),
+ os.path.join('Results', 'WaterDistributionNetwork', 'Pipe'), # noqa: PTH118
run_dir,
)
material = aim_data['GeneralInformation'].get('Material', None)
- if material == None:
+ if material == None: # noqa: E711
# raise ValueError("Material is none")
material = 'CI'
@@ -110,10 +110,10 @@ def createPipeDamageInputForREWET(pipe_damage_data, run_dir, event_time, sc_geoj
if damage_val > 0:
if damage_val == 1:
damage_type = 'leak'
- elif damage_val == 2:
+ elif damage_val == 2: # noqa: PLR2004
damage_type = 'break'
else:
- raise ValueError('The damage type must be eother 1 or 2')
+ raise ValueError('The damage type must be eother 1 or 2') # noqa: EM101, TRY003
else:
continue
@@ -136,10 +136,10 @@ def createPipeDamageInputForREWET(pipe_damage_data, run_dir, event_time, sc_geoj
# REWET_input_data["Pipe_damage_list"] = pipe_damage_list
# REWET_input_data["AIM"] = aim_data
- return pipe_damage_list
+ return pipe_damage_list # noqa: RET504
-def createNodeDamageInputForREWET(node_damage_data, run_dir, event_time):
+def createNodeDamageInputForREWET(node_damage_data, run_dir, event_time): # noqa: N802
"""Creates REWET-style node damage file.
Parameters
@@ -154,8 +154,8 @@ def createNodeDamageInputForREWET(node_damage_data, run_dir, event_time):
node_damage_list : Pandas Series
REWET-style node damage file.
- """
- node_id_list = [key for key in node_damage_data]
+ """ # noqa: D401
+ node_id_list = [key for key in node_damage_data] # noqa: C416
damage_list = []
damage_time = event_time
@@ -172,11 +172,11 @@ def createNodeDamageInputForREWET(node_damage_data, run_dir, event_time):
cur_data = node_damage_data[node_id]
cur_damage = cur_data['Damage']
- cur_demand = cur_data['Demand']
+ cur_demand = cur_data['Demand'] # noqa: F841
aim_data = findAndReadAIMFile(
node_id,
- os.path.join('Results', 'WaterDistributionNetwork', 'Node'),
+ os.path.join('Results', 'WaterDistributionNetwork', 'Node'), # noqa: PTH118
run_dir,
)
@@ -195,10 +195,10 @@ def createNodeDamageInputForREWET(node_damage_data, run_dir, event_time):
data=damage_list, index=[damage_time for val in damage_list], dtype='O'
)
- return node_damage_list
+ return node_damage_list # noqa: RET504
-def createPumpDamageInputForREWET(pump_damage_data, REWET_input_data):
+def createPumpDamageInputForREWET(pump_damage_data, REWET_input_data): # noqa: N802, N803
"""Creates REWET-style pump damage file.
Parameters
@@ -213,8 +213,8 @@ def createPumpDamageInputForREWET(pump_damage_data, REWET_input_data):
pump_damage_list : Pandas Series
REWET-style pump damage file.
- """
- pump_id_list = [key for key in pump_damage_data]
+ """ # noqa: D401
+ pump_id_list = [key for key in pump_damage_data] # noqa: C416
damage_list = []
damage_time = REWET_input_data['event_time']
@@ -247,10 +247,10 @@ def createPumpDamageInputForREWET(pump_damage_data, REWET_input_data):
index=[damage_time for val in damage_list], data=damage_list
)
- return pump_damage_list
+ return pump_damage_list # noqa: RET504
-def createTankDamageInputForREWET(tank_damage_data, REWET_input_data):
+def createTankDamageInputForREWET(tank_damage_data, REWET_input_data): # noqa: N802, N803
"""Creates REWET-style Tank damage file.
Parameters
@@ -265,8 +265,8 @@ def createTankDamageInputForREWET(tank_damage_data, REWET_input_data):
tank_damage_list : Pandas Series
REWET-style tank damage file.
- """
- tank_id_list = [key for key in tank_damage_data]
+ """ # noqa: D401
+ tank_id_list = [key for key in tank_damage_data] # noqa: C416
damage_list = []
damage_time = REWET_input_data['event_time']
@@ -302,10 +302,10 @@ def createTankDamageInputForREWET(tank_damage_data, REWET_input_data):
index=[damage_time for val in damage_list], data=damage_list
)
- return tank_damage_list
+ return tank_damage_list # noqa: RET504
-def findAndReadAIMFile(asset_id, asset_type, run_dir):
+def findAndReadAIMFile(asset_id, asset_type, run_dir): # noqa: N802
"""Finds and read the AIM file for an asset.
Parameters
@@ -322,15 +322,15 @@ def findAndReadAIMFile(asset_id, asset_type, run_dir):
aim_file_data : dict
AIM file data as a dict.
- """
+ """ # noqa: D401
file_path = Path(
run_dir, asset_type, str(asset_id), 'templatedir', f'{asset_id}-AIM.json'
)
aim_file_data = preprocessorIO.readJSONFile(str(file_path))
- return aim_file_data
+ return aim_file_data # noqa: RET504
-def getPumpRetsoreTime(damage_state):
+def getPumpRetsoreTime(damage_state): # noqa: N802
"""NOT USED! WE WILL GET IT FROM PELICUN
Provides the restore time based on HAZUS repair time or any other
@@ -350,10 +350,10 @@ def getPumpRetsoreTime(damage_state):
Retstor time : int
- """
+ """ # noqa: D400
if damage_state == 1:
restore_time = int(3 * 24 * 3600)
- elif damage_state == 2:
+ elif damage_state == 2: # noqa: PLR2004
restore_time = int(7 * 24 * 3600)
else:
restore_time = CBIG_int
@@ -361,7 +361,7 @@ def getPumpRetsoreTime(damage_state):
return restore_time
-def getTankRetsoreTime(tank_type, damage_state):
+def getTankRetsoreTime(tank_type, damage_state): # noqa: ARG001, N802
"""NOT USED! WE WILL GET IT FROM PELICUN
Provides the restore time based on HAZUS repair time or any other
@@ -383,10 +383,10 @@ def getTankRetsoreTime(tank_type, damage_state):
Retstor time : int
- """
+ """ # noqa: D400
if damage_state == 1:
restore_time = int(3 * 24 * 3600)
- elif damage_state == 2:
+ elif damage_state == 2: # noqa: PLR2004
restore_time = int(7 * 24 * 3600)
else:
restore_time = CBIG_int
@@ -394,7 +394,7 @@ def getTankRetsoreTime(tank_type, damage_state):
return restore_time
-def readDamagefile(file_addr, run_dir, event_time, sc_geojson):
+def readDamagefile(file_addr, run_dir, event_time, sc_geojson): # noqa: N802
"""Reads PELICUN damage files and create REWET-Style damage for all
WaterDistributionNetwork elements
@@ -412,8 +412,8 @@ def readDamagefile(file_addr, run_dir, event_time, sc_geojson):
damage_data : dict
Damage data in PELICUN dict format.
- """
- # TODO: Make reading once for each scenario
+ """ # noqa: D205, D400, D401
+ # TODO: Make reading once for each scenario # noqa: TD002
# wn = wntrfr.network.WaterNetworkModel(REWET_input_data["inp_file"] )
diff --git a/modules/systemPerformance/REWET/preprocessorIO.py b/modules/systemPerformance/REWET/preprocessorIO.py
index 059814ee6..3b7d0c9a7 100644
--- a/modules/systemPerformance/REWET/preprocessorIO.py
+++ b/modules/systemPerformance/REWET/preprocessorIO.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2024 The Regents of the University of California
# Copyright (c) 2024 Leland Stanford Junior University
#
@@ -42,7 +42,7 @@
import pandas as pd
-def readJSONFile(file_addr):
+def readJSONFile(file_addr): # noqa: N802
"""Reads a json file.
Parameters
@@ -60,14 +60,14 @@ def readJSONFile(file_addr):
data : dict
JSON File data as a dict.
- """
- if not os.path.exists(file_addr):
- raise ValueError('INPUT WHALE FILE is not found.', repr(file_addr))
+ """ # noqa: D401
+ if not os.path.exists(file_addr): # noqa: PTH110
+ raise ValueError('INPUT WHALE FILE is not found.', repr(file_addr)) # noqa: EM101, TRY003
- with open(file_addr) as f:
+ with open(file_addr) as f: # noqa: PTH123
data = json.load(f)
- return data
+ return data # noqa: RET504
# =============================================================================
@@ -106,7 +106,7 @@ def readJSONFile(file_addr):
# =============================================================================
-def save_damage_data(damage_save_path, damage_data, scn_number):
+def save_damage_data(damage_save_path, damage_data, scn_number): # noqa: D103
pipe_damage_data = damage_data['Pipe']
node_damage_data = damage_data['Node']
pump_damage_data = damage_data['Pump']
@@ -117,10 +117,10 @@ def save_damage_data(damage_save_path, damage_data, scn_number):
pump_damage_file_name = f'pump_damage_{scn_number}'
tank_damage_file_name = f'tank_damage_{scn_number}'
- pipe_damage_file_path = os.path.join(damage_save_path, pipe_damage_file_name)
- node_damage_file_path = os.path.join(damage_save_path, node_damage_file_name)
- pump_damage_file_path = os.path.join(damage_save_path, pump_damage_file_name)
- tank_damage_file_path = os.path.join(damage_save_path, tank_damage_file_name)
+ pipe_damage_file_path = os.path.join(damage_save_path, pipe_damage_file_name) # noqa: PTH118
+ node_damage_file_path = os.path.join(damage_save_path, node_damage_file_name) # noqa: PTH118
+ pump_damage_file_path = os.path.join(damage_save_path, pump_damage_file_name) # noqa: PTH118
+ tank_damage_file_path = os.path.join(damage_save_path, tank_damage_file_name) # noqa: PTH118
pipe_damage_data.to_pickle(pipe_damage_file_path)
node_damage_data.to_pickle(node_damage_file_path)
@@ -134,10 +134,10 @@ def save_damage_data(damage_save_path, damage_data, scn_number):
'Tank': tank_damage_file_name,
}
- return damage_file_name_list
+ return damage_file_name_list # noqa: RET504
-def create_scneario_table():
+def create_scneario_table(): # noqa: D103
scenario_table = pd.DataFrame(
dtype='O',
columns=[
@@ -149,16 +149,16 @@ def create_scneario_table():
'Probability',
],
)
- return scenario_table
+ return scenario_table # noqa: RET504
-def update_scenario_table(scenario_table, cur_damage_file_name_list, scn_number):
+def update_scenario_table(scenario_table, cur_damage_file_name_list, scn_number): # noqa: D103
if isinstance(scenario_table, pd.core.frame.DataFrame):
scenario_table = scenario_table.to_dict('records')
elif isinstance(scenario_table, list):
pass
else:
- raise ValueError('This is an unknown behavior.')
+ raise ValueError('This is an unknown behavior.') # noqa: EM101, TRY003, TRY004
new_row = {
'Scenario Name': f'SCN_{scn_number}',
@@ -187,13 +187,13 @@ def save_scenario_table(scenario_table, scenario_table_file_path):
-------
None.
- """
+ """ # noqa: D205, D400, D401
if isinstance(scenario_table, pd.core.frame.DataFrame):
pass
elif isinstance(scenario_table, list):
scenario_table = pd.DataFrame(scenario_table)
else:
- raise ValueError('This is an unknown behavior.')
+ raise ValueError('This is an unknown behavior.') # noqa: EM101, TRY003, TRY004
scenario_table = scenario_table.set_index('Scenario Name')
@@ -202,7 +202,7 @@ def save_scenario_table(scenario_table, scenario_table_file_path):
scenario_table.to_excel(scenario_table_file_path)
-def saveSettingsFile(REWET_input_data, save_directory, prefix):
+def saveSettingsFile(REWET_input_data, save_directory, prefix): # noqa: N802, N803
"""Saves settings data that REWET NEEDs.
Parameters
@@ -214,14 +214,14 @@ def saveSettingsFile(REWET_input_data, save_directory, prefix):
-------
None.
- """
+ """ # noqa: D401
settings = REWET_input_data['settings']
- if prefix == None:
+ if prefix == None: # noqa: E711
settings_file_name = 'settings.json'
else:
settings_file_name = prefix + '_' + 'settings.json'
damage_save_path = save_directory / settings_file_name
- with open(damage_save_path, 'w') as f:
+ with open(damage_save_path, 'w') as f: # noqa: PTH123
json.dump(settings, f, indent=4)
return damage_save_path
diff --git a/modules/tools/BRAILS/getBRAILSAttributes.py b/modules/tools/BRAILS/getBRAILSAttributes.py
index d325631ac..59528c4ed 100644
--- a/modules/tools/BRAILS/getBRAILSAttributes.py
+++ b/modules/tools/BRAILS/getBRAILSAttributes.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2024 The Regents of the University of California
#
# This file is a part of SimCenter backend applications.
@@ -43,7 +43,7 @@
import sys
from importlib import metadata as importlib_metadata
-print('Initializing BRAILS...')
+print('Initializing BRAILS...') # noqa: T201
# If not installed, install BRAILS, argparse, and requests:
required = {'BRAILS', 'argparse', 'requests'}
@@ -51,9 +51,9 @@
# Detect installed packages using Python-provided importlib.metadata:
for x in importlib_metadata.distributions():
- try:
+ try: # noqa: SIM105
installed.add(x.name)
- except:
+ except: # noqa: S110, PERF203, E722
pass
# If installed packages could not be detected, use importlib_metadata backport:
@@ -61,73 +61,73 @@
import importlib_metadata
for x in importlib_metadata.distributions():
- try:
+ try: # noqa: SIM105
installed.add(x.name)
- except:
+ except: # noqa: S110, PERF203, E722
pass
missing = required - installed
# Install missing packages:
python = sys.executable
if missing:
- print('\nInstalling packages required for running this widget...')
- subprocess.check_call(
+ print('\nInstalling packages required for running this widget...') # noqa: T201
+ subprocess.check_call( # noqa: S603
[python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL
)
- print('Successfully installed the required packages')
+ print('Successfully installed the required packages') # noqa: T201
# If requests and BRAILS were previously installed ensure they are at their latest versions:
-subprocess.check_call(
+subprocess.check_call( # noqa: S603
[python, '-m', 'pip', 'install', 'requests', '-U'], stdout=subprocess.DEVNULL
)
-import requests
+import requests # noqa: E402
-latestBrailsVersion = requests.get('https://pypi.org/pypi/BRAILS/json').json()[
+latestBrailsVersion = requests.get('https://pypi.org/pypi/BRAILS/json').json()[ # noqa: S113, N816
'info'
]['version']
if importlib_metadata.version('BRAILS') != latestBrailsVersion:
- print(
+ print( # noqa: T201
'\nAn older version of BRAILS was detected. Updating to the latest BRAILS version..'
)
- subprocess.check_call(
+ subprocess.check_call( # noqa: S603
[python, '-m', 'pip', 'install', 'BRAILS', '-U'], stdout=subprocess.DEVNULL
)
- print('Successfully installed the latest version of BRAILS')
+ print('Successfully installed the latest version of BRAILS') # noqa: T201
# Import packages required for running the latest version of BRAILS:
-import argparse
-import os
-from time import gmtime, strftime
+import argparse # noqa: E402
+import os # noqa: E402
+from time import gmtime, strftime # noqa: E402
-from brails.EnabledAttributes import BldgAttributes
+from brails.EnabledAttributes import BldgAttributes # noqa: E402
# Define a standard way of printing program outputs:
-def log_msg(msg):
+def log_msg(msg): # noqa: D103
formatted_msg = '{} {}'.format(strftime('%Y-%m-%dT%H:%M:%SZ', gmtime()), msg)
- print(formatted_msg)
+ print(formatted_msg) # noqa: T201
# Define a way to call BRAILS BldgAttributes and write them in a file:
-def runBrails(outputfile):
+def runBrails(outputfile): # noqa: N802, D103
attributes = BldgAttributes()
- with open(outputfile, 'w') as f:
+ with open(outputfile, 'w') as f: # noqa: PTH123
f.write('\n'.join(attributes))
# Define a way to collect GUI input:
-def main(args):
+def main(args): # noqa: D103
parser = argparse.ArgumentParser()
parser.add_argument('--outputFile', default=None)
args = parser.parse_args(args)
# Create the folder for the output file, if it does not exist:
- outdir = os.path.abspath(args.outputFile).replace(
+ outdir = os.path.abspath(args.outputFile).replace( # noqa: PTH100
os.path.split(args.outputFile)[-1], ''
)
- os.makedirs(outdir, exist_ok=True)
+ os.makedirs(outdir, exist_ok=True) # noqa: PTH103
# Run BRAILS with the user-defined arguments:
runBrails(args.outputFile)
diff --git a/modules/tools/BRAILS/getBRAILSBaselineInv.py b/modules/tools/BRAILS/getBRAILSBaselineInv.py
index 54833df4b..e76b2ff35 100644
--- a/modules/tools/BRAILS/getBRAILSBaselineInv.py
+++ b/modules/tools/BRAILS/getBRAILSBaselineInv.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2024 The Regents of the University of California
#
# This file is a part of SimCenter backend applications.
@@ -49,43 +49,43 @@
# Define a standard way of printing program outputs:
-def log_msg(msg):
+def log_msg(msg): # noqa: D103
formatted_msg = '{} {}'.format(strftime('%Y-%m-%dT%H:%M:%SZ', gmtime()), msg)
- print(formatted_msg)
+ print(formatted_msg) # noqa: T201
# Define a way to call BRAILS FootprintHandler and NSIParser:
-def runBrails(
- latMin,
- latMax,
- longMin,
- longMax,
- locationStr,
- fpSrc,
- invInp,
- invAttrMap,
- outputDataType,
+def runBrails( # noqa: N802, D103
+ latMin, # noqa: N803
+ latMax, # noqa: N803
+ longMin, # noqa: N803
+ longMax, # noqa: N803
+ locationStr, # noqa: N803
+ fpSrc, # noqa: N803
+ invInp, # noqa: N803
+ invAttrMap, # noqa: N803
+ outputDataType, # noqa: N803
outputfile,
lengthunit,
):
# Initialize FootprintHandler:
- fpHandler = FootprintHandler()
+ fpHandler = FootprintHandler() # noqa: N806
if locationStr == '""':
- locationStr = ''
+ locationStr = '' # noqa: N806
if invInp == 'NSI':
- nsiParser = NSIParser()
+ nsiParser = NSIParser() # noqa: N806
# Format location input based on the GUI input:
if 'geojson' in fpSrc.lower() or 'csv' in fpSrc.lower():
location = fpSrc
- fpSrc = 'osm'
- fpUserSpecified = True
+ fpSrc = 'osm' # noqa: N806
+ fpUserSpecified = True # noqa: N806
elif locationStr == '':
location = (longMin, latMin, longMax, latMax)
- fpUserSpecified = False
+ fpUserSpecified = False # noqa: N806
else:
location = locationStr
- fpUserSpecified = False
+ fpUserSpecified = False # noqa: N806
# Get raw NSI data:
if outputDataType == 'raw':
@@ -95,9 +95,9 @@ def runBrails(
(
bpoly,
_,
- ) = fpHandler._FootprintHandler__bbox2poly(location)
+ ) = fpHandler._FootprintHandler__bbox2poly(location) # noqa: SLF001
else:
- bpoly, _, _ = fpHandler._FootprintHandler__fetch_roi(location)
+ bpoly, _, _ = fpHandler._FootprintHandler__fetch_roi(location) # noqa: SLF001
nsiParser.GetRawDataROI(bpoly, outputfile)
else:
fpHandler.fetch_footprint_data(
@@ -129,7 +129,7 @@ def runBrails(
# Define a way to collect GUI input:
-def main(args):
+def main(args): # noqa: D103
parser = argparse.ArgumentParser()
parser.add_argument('--latMin', default=None, type=float)
parser.add_argument('--latMax', default=None, type=float)
@@ -146,10 +146,10 @@ def main(args):
args = parser.parse_args(args)
# Create the folder for the user-defined output directory, if it does not exist:
- outdir = os.path.abspath(args.outputFile).replace(
+ outdir = os.path.abspath(args.outputFile).replace( # noqa: PTH100
os.path.split(args.outputFile)[-1], ''
)
- os.makedirs(outdir, exist_ok=True)
+ os.makedirs(outdir, exist_ok=True) # noqa: PTH103
# Run BRAILS with the user-defined arguments:
runBrails(
diff --git a/modules/tools/BRAILS/getBRAILSFootprints.py b/modules/tools/BRAILS/getBRAILSFootprints.py
index 0a66a0050..007ec2beb 100644
--- a/modules/tools/BRAILS/getBRAILSFootprints.py
+++ b/modules/tools/BRAILS/getBRAILSFootprints.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2024 The Regents of the University of California
#
# This file is a part of SimCenter backend applications.
@@ -48,32 +48,32 @@
# Define a standard way of printing program outputs:
-def log_msg(msg):
+def log_msg(msg): # noqa: D103
formatted_msg = '{} {}'.format(strftime('%Y-%m-%dT%H:%M:%SZ', gmtime()), msg)
- print(formatted_msg)
+ print(formatted_msg) # noqa: T201
# Define a way to call BRAILS FootprintHandler:
-def runBrails(
- latMin,
- latMax,
- longMin,
- longMax,
- locationStr,
- fpSrc,
- fpSourceAttrMap,
+def runBrails( # noqa: N802, D103
+ latMin, # noqa: N803
+ latMax, # noqa: N803
+ longMin, # noqa: N803
+ longMax, # noqa: N803
+ locationStr, # noqa: N803
+ fpSrc, # noqa: N803
+ fpSourceAttrMap, # noqa: N803
outputfile,
lengthunit,
):
# Initialize FootprintHandler:
- fpHandler = FootprintHandler()
+ fpHandler = FootprintHandler() # noqa: N806
if locationStr == '""':
- locationStr = ''
+ locationStr = '' # noqa: N806
# Format location input based on the GUI input:
if 'geojson' in fpSrc.lower() or 'csv' in fpSrc.lower():
location = fpSrc
- fpSrc = 'osm'
+ fpSrc = 'osm' # noqa: N806
elif locationStr == '':
location = (longMin, latMin, longMax, latMax)
else:
@@ -95,7 +95,7 @@ def runBrails(
# Define a way to collect GUI input:
-def main(args):
+def main(args): # noqa: D103
parser = argparse.ArgumentParser()
parser.add_argument('--latMin', default=None, type=float)
parser.add_argument('--latMax', default=None, type=float)
@@ -110,10 +110,10 @@ def main(args):
args = parser.parse_args(args)
# Create the folder for the user-defined output directory, if it does not exist:
- outdir = os.path.abspath(args.outputFile).replace(
+ outdir = os.path.abspath(args.outputFile).replace( # noqa: PTH100
os.path.split(args.outputFile)[-1], ''
)
- os.makedirs(outdir, exist_ok=True)
+ os.makedirs(outdir, exist_ok=True) # noqa: PTH103
# Run BRAILS FootprintHandler with the user-defined arguments:
runBrails(
diff --git a/modules/tools/BRAILS/getBRAILSLocationBoundary.py b/modules/tools/BRAILS/getBRAILSLocationBoundary.py
index e95cbd3e7..93190ff31 100644
--- a/modules/tools/BRAILS/getBRAILSLocationBoundary.py
+++ b/modules/tools/BRAILS/getBRAILSLocationBoundary.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2024 The Regents of the University of California
#
# This file is a part of SimCenter backend applications.
@@ -48,28 +48,28 @@
# Define a standard way of printing program outputs:
-def log_msg(msg):
+def log_msg(msg): # noqa: D103
formatted_msg = '{} {}'.format(strftime('%Y-%m-%dT%H:%M:%SZ', gmtime()), msg)
- print(formatted_msg)
+ print(formatted_msg) # noqa: T201
# Define a way to call BRAILS FootprintHandler:
-def runBrails(latMin, latMax, longMin, longMax, locationStr, outputfile):
+def runBrails(latMin, latMax, longMin, longMax, locationStr, outputfile): # noqa: N802, N803, D103
# Initialize FootprintHandler:
- fpHandler = FootprintHandler()
+ fpHandler = FootprintHandler() # noqa: N806
if locationStr == '""':
- locationStr = ''
+ locationStr = '' # noqa: N806
# Run FootprintHandler to generate the boundary GeoJSON file for the entered location:
if locationStr == '':
- fpHandler._FootprintHandler__bbox2poly(
+ fpHandler._FootprintHandler__bbox2poly( # noqa: SLF001
(longMin, latMin, longMax, latMax), outfile=outputfile
)
else:
- fpHandler._FootprintHandler__fetch_roi(locationStr, outfile=outputfile)
+ fpHandler._FootprintHandler__fetch_roi(locationStr, outfile=outputfile) # noqa: SLF001
# Define a way to collect GUI input:
-def main(args):
+def main(args): # noqa: D103
parser = argparse.ArgumentParser()
parser.add_argument('--latMin', default=None, type=float)
parser.add_argument('--latMax', default=None, type=float)
@@ -81,10 +81,10 @@ def main(args):
args = parser.parse_args(args)
# Create the folder for the user-defined output directory, if it does not exist:
- outdir = os.path.abspath(args.outputFile).replace(
+ outdir = os.path.abspath(args.outputFile).replace( # noqa: PTH100
os.path.split(args.outputFile)[-1], ''
)
- os.makedirs(outdir, exist_ok=True)
+ os.makedirs(outdir, exist_ok=True) # noqa: PTH103
# Run BRAILS FootprintHandler with the user-defined arguments:
runBrails(
diff --git a/modules/tools/BRAILS/runBrails.py b/modules/tools/BRAILS/runBrails.py
index 4755a9f61..39d15402b 100644
--- a/modules/tools/BRAILS/runBrails.py
+++ b/modules/tools/BRAILS/runBrails.py
@@ -1,4 +1,4 @@
-#
+# # noqa: INP001, D100
# Copyright (c) 2023 The Regents of the University of California
#
# This file is a part of SimCenter backend applications.
@@ -48,67 +48,67 @@
# Define a standard way of printing program outputs:
-def log_msg(msg):
+def log_msg(msg): # noqa: D103
formatted_msg = '{} {}'.format(strftime('%Y-%m-%dT%H:%M:%SZ', gmtime()), msg)
- print(formatted_msg)
+ print(formatted_msg) # noqa: T201
# Define a way to call BRAILS InventoryGenerator:
-def runBrails(
- latMin,
- latMax,
- longMin,
- longMax,
- locationStr,
- lengthUnit,
- fpSource,
- fpAttrMap,
- invInput,
- invAttributeMap,
- attrRequested,
- outputFile,
+def runBrails( # noqa: N802, D103, PLR0913
+ latMin, # noqa: N803
+ latMax, # noqa: N803
+ longMin, # noqa: N803
+ longMax, # noqa: N803
+ locationStr, # noqa: N803
+ lengthUnit, # noqa: N803
+ fpSource, # noqa: N803
+ fpAttrMap, # noqa: N803
+ invInput, # noqa: N803
+ invAttributeMap, # noqa: N803
+ attrRequested, # noqa: N803
+ outputFile, # noqa: N803
seed,
- numBuildings,
- getAllBuildings,
- gKey,
+ numBuildings, # noqa: N803
+ getAllBuildings, # noqa: N803
+ gKey, # noqa: N803
):
# Format location input based on the GUI input:
if locationStr == '""':
- locationStr = ''
+ locationStr = '' # noqa: N806
if 'geojson' in fpSource.lower():
- locationInp = fpSource
- fpSource = 'osm'
+ locationInp = fpSource # noqa: N806
+ fpSource = 'osm' # noqa: N806
elif locationStr == '':
- locationInp = (longMin, latMin, longMax, latMax)
+ locationInp = (longMin, latMin, longMax, latMax) # noqa: N806
else:
- locationInp = locationStr
+ locationInp = locationStr # noqa: N806
# Parse baseline inventory input from GUI collected values:
if invInput == 'None':
- baselineInvInp = ''
+ baselineInvInp = '' # noqa: N806
elif invInput == 'NSI':
- baselineInvInp = 'nsi'
+ baselineInvInp = 'nsi' # noqa: N806
else:
- baselineInvInp = invInput
+ baselineInvInp = invInput # noqa: N806
# Get attribute map input by processing the GUI input:
if baselineInvInp and invAttributeMap:
- attrmapInp = invAttributeMap
+ attrmapInp = invAttributeMap # noqa: N806
elif fpAttrMap:
- attrmapInp = fpAttrMap
+ attrmapInp = fpAttrMap # noqa: N806
else:
- attrmapInp = ''
+ attrmapInp = '' # noqa: N806
# Format number of buildings and requested attributes inputs by parsing the
# GUI input:
if getAllBuildings:
- numBuildings = 'all'
+ numBuildings = 'all' # noqa: N806
if attrRequested not in ['all', 'hazuseq']:
- attrRequested = attrRequested.split(',')
+ attrRequested = attrRequested.split(',') # noqa: N806
# Initialize InventoryGenerator:
- invGenerator = InventoryGenerator(
+ invGenerator = InventoryGenerator( # noqa: N806
location=locationInp,
fpSource=fpSource,
baselineInv=baselineInvInp,
@@ -127,7 +127,7 @@ def runBrails(
# Define a way to collect GUI input:
-def main(args):
+def main(args): # noqa: D103
parser = argparse.ArgumentParser()
parser.add_argument('--latMin', default=None, type=float)
parser.add_argument('--latMax', default=None, type=float)
@@ -149,10 +149,10 @@ def main(args):
args = parser.parse_args(args)
# Create the folder for the user-defined output directory, if it does not exist:
- outdir = os.path.abspath(args.outputFile).replace(
+ outdir = os.path.abspath(args.outputFile).replace( # noqa: PTH100
os.path.split(args.outputFile)[-1], ''
)
- os.makedirs(outdir, exist_ok=True)
+ os.makedirs(outdir, exist_ok=True) # noqa: PTH103
# Run BRAILS InventoryGenerator with the user-defined arguments:
runBrails(
diff --git a/modules/tools/BRAILS/runBrailsTransp.py b/modules/tools/BRAILS/runBrailsTransp.py
index 68bab0903..b7c3cf418 100644
--- a/modules/tools/BRAILS/runBrailsTransp.py
+++ b/modules/tools/BRAILS/runBrailsTransp.py
@@ -1,4 +1,4 @@
-# Import packages needed for setting up required packages:
+# Import packages needed for setting up required packages: # noqa: INP001, D100
import importlib.metadata
import subprocess
import sys
@@ -7,74 +7,74 @@
required = {'BRAILS', 'argparse', 'requests'}
installed = set()
for x in importlib.metadata.distributions():
- try:
+ try: # noqa: SIM105
installed.add(x.name)
- except:
+ except: # noqa: S110, PERF203, E722
pass
missing = required - installed
python = sys.executable
if missing:
- print('\nInstalling packages required for running this widget...')
- subprocess.check_call(
+ print('\nInstalling packages required for running this widget...') # noqa: T201
+ subprocess.check_call( # noqa: S603
[python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL
)
- print('Successfully installed the required packages')
+ print('Successfully installed the required packages') # noqa: T201
# If BRAILS was previously installed ensure it is the latest version:
-import requests
+import requests # noqa: E402
-latestBrailsVersion = requests.get('https://pypi.org/pypi/BRAILS/json').json()[
+latestBrailsVersion = requests.get('https://pypi.org/pypi/BRAILS/json').json()[ # noqa: S113, N816
'info'
]['version']
if importlib.metadata.version('BRAILS') != latestBrailsVersion:
- print(
+ print( # noqa: T201
'\nAn older version of BRAILS was detected. Updating to the latest BRAILS version..'
)
- subprocess.check_call(
+ subprocess.check_call( # noqa: S603
[python, '-m', 'pip', 'install', 'BRAILS', '-U'], stdout=subprocess.DEVNULL
)
- print('Successfully installed the latest version of BRAILS')
+ print('Successfully installed the latest version of BRAILS') # noqa: T201
# Import packages required for running the latest version of BRAILS:
-import argparse
-import os
-from time import gmtime, strftime
+import argparse # noqa: E402
+import os # noqa: E402
+from time import gmtime, strftime # noqa: E402
-from brails.TranspInventoryGenerator import TranspInventoryGenerator
+from brails.TranspInventoryGenerator import TranspInventoryGenerator # noqa: E402
-def str2bool(v):
+def str2bool(v): # noqa: D103
# courtesy of Maxim @ stackoverflow
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 'True', 't', 'y', '1'):
return True
- elif v.lower() in ('no', 'false', 'False', 'f', 'n', '0'):
+ elif v.lower() in ('no', 'false', 'False', 'f', 'n', '0'): # noqa: RET505
return False
else:
- raise argparse.ArgumentTypeError('Boolean value expected.')
+ raise argparse.ArgumentTypeError('Boolean value expected.') # noqa: EM101, TRY003
# Define a standard way of printing program outputs:
-def log_msg(msg):
+def log_msg(msg): # noqa: D103
formatted_msg = '{} {}'.format(strftime('%Y-%m-%dT%H:%M:%SZ', gmtime()), msg)
- print(formatted_msg)
+ print(formatted_msg) # noqa: T201
# Define a way to call BRAILS TranspInventoryGenerator:
-def runBrails(
- latMin,
- latMax,
- longMin,
- longMax,
- minimumHAZUS,
- maxRoadLength,
- lengthUnit,
+def runBrails( # noqa: N802, D103
+ latMin, # noqa: N803
+ latMax, # noqa: N803
+ longMin, # noqa: N803
+ longMax, # noqa: N803
+ minimumHAZUS, # noqa: N803
+ maxRoadLength, # noqa: N803
+ lengthUnit, # noqa: N803
):
# Initialize TranspInventoryGenerator:
- invGenerator = TranspInventoryGenerator(
+ invGenerator = TranspInventoryGenerator( # noqa: N806
location=(longMin, latMin, longMax, latMax)
)
@@ -87,7 +87,7 @@ def runBrails(
)
-def main(args):
+def main(args): # noqa: D103
parser = argparse.ArgumentParser()
parser.add_argument('--latMin', default=None, type=float)
parser.add_argument('--latMax', default=None, type=float)
@@ -103,7 +103,7 @@ def main(args):
args = parser.parse_args(args)
# Change the current directory to the user-defined output folder:
- os.makedirs(args.outputFolder, exist_ok=True)
+ os.makedirs(args.outputFolder, exist_ok=True) # noqa: PTH103
os.chdir(args.outputFolder)
# Run BRAILS TranspInventoryGenerator with the user-defined arguments: