diff --git a/modules/Workflow/WorkflowApplications.json b/modules/Workflow/WorkflowApplications.json index 117f2d51c..465b073a7 100644 --- a/modules/Workflow/WorkflowApplications.json +++ b/modules/Workflow/WorkflowApplications.json @@ -285,12 +285,12 @@ "ApplicationSpecificInputs": [ { "id": "Directory", - "type": "path", + "type": "string", "description": "Path to file containing folder of shake maps" }, { "id": "EventPath", - "type": "string", + "type": "path", "description": "Path to the shake map event" }, { @@ -552,6 +552,11 @@ } ] }, + { + "Name": "MPMEvent", + "ExecutablePath": "applications/createEVENT/MPMEvent/MPMEvent.py", + "ApplicationSpecificInputs": [] + }, { "Name": "StochasticWave", "ExecutablePath": "applications/createEVENT/stochasticWave/StochasticWave.py", @@ -1274,4 +1279,4 @@ } ] } -} \ No newline at end of file +} diff --git a/modules/createEVENT/CMakeLists.txt b/modules/createEVENT/CMakeLists.txt index eb5d33021..8150a2880 100644 --- a/modules/createEVENT/CMakeLists.txt +++ b/modules/createEVENT/CMakeLists.txt @@ -27,6 +27,7 @@ add_subdirectory(physicsBasedMotion) add_subdirectory(M9) add_subdirectory(Istanbul) add_subdirectory(MPM) +add_subdirectory(MPMEvent) add_subdirectory(stochasticWave) add_subdirectory(TaichiEvent) add_subdirectory(CelerisTaichiEvent) diff --git a/modules/createEVENT/IsolatedBuildingCFD/post_process_output.py b/modules/createEVENT/IsolatedBuildingCFD/post_process_output.py index ee02c9daa..9cb60212a 100644 --- a/modules/createEVENT/IsolatedBuildingCFD/post_process_output.py +++ b/modules/createEVENT/IsolatedBuildingCFD/post_process_output.py @@ -259,7 +259,7 @@ def read_pressure_data(file_names): # index += 1 except: # noqa: E722 - # sys.exit('Fatal Error!: the pressure filese have time gap') + # sys.exit('Fatal Error!: the pressure files have time gap') index = 0 # Joint them even if they have a time gap connected_time = np.concatenate((connected_time, time2[index:])) diff --git a/modules/createEVENT/MPMEvent/CMakeLists.txt b/modules/createEVENT/MPMEvent/CMakeLists.txt new file mode 100644 index 000000000..64c9a83fd --- /dev/null +++ b/modules/createEVENT/MPMEvent/CMakeLists.txt @@ -0,0 +1 @@ +simcenter_add_python_script(SCRIPT MPMEvent.py) diff --git a/modules/createEVENT/MPMEvent/MPMEvent.py b/modules/createEVENT/MPMEvent/MPMEvent.py new file mode 100644 index 000000000..50f790080 --- /dev/null +++ b/modules/createEVENT/MPMEvent/MPMEvent.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python3 # noqa: D100 + +import argparse +import json +import os +import re +import subprocess +import sys +from fractions import Fraction + +import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + +class FloorForces: # noqa: D101 + def __init__(self): + self.X = [0] + self.Y = [0] + self.Z = [0] + + +def directionToDof(direction): # noqa: N802 + """Converts direction to degree of freedom""" # noqa: D400, D401 + directionMap = {'X': 1, 'Y': 2, 'Z': 3} # noqa: N806 + + return directionMap[direction] + + +def addFloorForceToEvent( # noqa: N802 + timeSeriesArray, # noqa: N803 + patternsArray, # noqa: N803 + force, + direction, + floor, + dT, # noqa: N803 +): + """Add force (one component) time series and pattern in the event file""" # noqa: D400 + seriesName = 'HydroForceSeries_' + str(floor) + direction # noqa: N806 + timeSeries = {'name': seriesName, 'dT': dT, 'type': 'Value', 'data': force} # noqa: N806 + + timeSeriesArray.append(timeSeries) + patternName = 'HydroForcePattern_' + str(floor) + direction # noqa: N806 + pattern = { + 'name': patternName, + 'timeSeries': seriesName, + 'type': 'HydroFloorLoad', + 'floor': str(floor), + 'dof': directionToDof(direction), + } + + patternsArray.append(pattern) + + +def addFloorForceToEvent(patternsArray, force, direction, floor): # noqa: ARG001, N802, N803, F811 + """Add force (one component) time series and pattern in the event file""" # noqa: D400 + seriesName = 'HydroForceSeries_' + str(floor) + direction # noqa: N806 + patternName = 'HydroForcePattern_' + str(floor) + direction # noqa: N806 + pattern = { + 'name': patternName, + 'timeSeries': seriesName, + 'type': 'HydroFloorLoad', + 'floor': str(floor), + 'dof': directionToDof(direction), + } + + patternsArray.append(pattern) + + +def addFloorPressure(pressureArray, floor): # noqa: N802, N803 + """Add floor pressure in the event file""" # noqa: D400 + floorPressure = {'story': str(floor), 'pressure': [0.0, 0.0]} # noqa: N806 + + pressureArray.append(floorPressure) + + +def writeEVENT(forces, eventFilePath): # noqa: N802, N803 + """This method writes the EVENT.json file""" # noqa: D400, D401, D404 + timeSeriesArray = [] # noqa: N806, F841 + patternsArray = [] # noqa: N806 + pressureArray = [] # noqa: N806 + hydroEventJson = { # noqa: N806 + 'type': 'Hydro', # Using HydroUQ + 'subtype': 'MPMEvent', # Using ClaymoreUW Material Point Method + # "timeSeries": [], # From GeoClawOpenFOAM + 'pattern': patternsArray, + 'pressure': pressureArray, + # "dT": deltaT, # From GeoClawOpenFOAM + 'numSteps': len(forces[0].X), + 'units': {'force': 'Newton', 'length': 'Meter', 'time': 'Sec'}, + } + + # Creating the event dictionary that will be used to export the EVENT json file + eventDict = {'randomVariables': [], 'Events': [hydroEventJson]} # noqa: N806 + + # Adding floor forces + for floorForces in forces: # noqa: N806 + floor = forces.index(floorForces) + 1 + addFloorForceToEvent(patternsArray, floorForces.X, 'X', floor) + addFloorForceToEvent(patternsArray, floorForces.Y, 'Y', floor) + # addFloorPressure(pressureArray, floor) # From GeoClawOpenFOAM + + with open(eventFilePath, 'w', encoding='utf-8') as eventsFile: # noqa: PTH123, N806 + json.dump(eventDict, eventsFile) + + +def GetFloorsCount(BIMFilePath): # noqa: N802, N803, D103 + with open(BIMFilePath, encoding='utf-8') as BIMFile: # noqa: PTH123, N806 + bim = json.load(BIMFile) + return int(bim['GeneralInformation']['stories']) + +def GetExecutableFile(BIMFilePath): # noqa: N802, N803, D103 + filePath = BIMFilePath # noqa: N806 + with open(filePath, encoding='utf-8') as file: # noqa: PTH123 + evt = json.load(file) + file.close # noqa: B018 + + executableNameKey = 'executableFile' + executablePathKey = executableNameKey + 'Path' + + for event in evt['Events']: + executableName = event[executableNameKey] + executablePath = event[executablePathKey] + + return os.path.join(executablePath, executableName) + + defaultExecutablePath = f'{os.path.realpath(os.path.dirname(__file__))}' # noqa: ISC003, PTH120 + defaultExecutableName = 'osu_lwf.exe' + return defaultExecutablePath + defaultExecutableName + +def GetSceneFile(BIMFilePath): # noqa: N802, N803, D103 + filePath = BIMFilePath # noqa: N806 + with open(filePath, encoding='utf-8') as file: # noqa: PTH123 + evt = json.load(file) + file.close # noqa: B018 + + fileNameKey = 'configFile' + filePathKey = fileNameKey + 'Path' + + for event in evt['Events']: + fileName = event[fileNameKey] + filePath = event[filePathKey] + + return os.path.join(filePath, fileName) + + defaultScriptPath = f'{os.path.realpath(os.path.dirname(__file__))}' # noqa: ISC003, PTH120 + defaultScriptName = 'scene.json' + return defaultScriptPath + defaultScriptName + + +def GetTimer(BIMFilePath): # noqa: N802, N803, D103 + filePath = BIMFilePath # noqa: N806 + with open(filePath, encoding='utf-8') as file: # noqa: PTH123 + evt = json.load(file) + file.close # noqa: B018 + + timerKey = 'maxMinutes' + + for event in evt['Events']: + timer = event[timerKey] + maxSeconds = timer * 60 + return maxSeconds + + return 0 + + +def main(): # noqa: D103 + """ + Entry point to generate event file using MPMEvent. + """ + return 0 + + +if __name__ == '__main__': + """ + Entry point to generate event file using Stochastic Waves + """ + # CLI parser + parser = argparse.ArgumentParser( + description='Get sample EVENT file produced by StochasticWave' + ) + parser.add_argument( + '-b', + '--filenameAIM', + help='BIM File', + required=True, + default='AIM.json', + ) + parser.add_argument( + '-e', + '--filenameEVENT', + help='Event File', + required=True, + default='EVENT.json', + ) + parser.add_argument('--getRV', help='getRV', required=False, action='store_true') + # parser.add_argument('--filenameSAM', default=None) + + # parsing arguments + arguments, unknowns = parser.parse_known_args() + + # import subprocess + + # Get json of filenameAIM + executableName = GetExecutableFile(arguments.filenameAIM) # noqa: N816 + scriptName = GetSceneFile(arguments.filenameAIM) # noqa: N816 + maxSeconds = GetTimer(arguments.filenameAIM) # noqa: N816 + + if arguments.getRV == True: # noqa: E712 + print('RVs requested') # noqa: T201 + # Read the number of floors + floorsCount = GetFloorsCount(arguments.filenameAIM) # noqa: N816 + filenameEVENT = arguments.filenameEVENT # noqa: N816 + + + result = subprocess.run( # noqa: S603 + [ # noqa: S607 + 'timeout', + str(maxSeconds), + executableName, + '-f', + scriptName, + # f'{os.path.realpath(os.path.dirname(__file__))}' # noqa: ISC003, PTH120 + # + '/taichi_script.py', + ], + stdout=subprocess.PIPE, + check=False, + ) + + forces = [] + for i in range(floorsCount): + forces.append(FloorForces(recorderID=(i + 1))) # noqa: PERF401 + + # write the event file + writeEVENT(forces, filenameEVENT, floorsCount) + + else: + print('No RVs requested') # noqa: T201 + filenameEVENT = arguments.filenameEVENT # noqa: N816 + result = subprocess.run( # noqa: S603 + [ # noqa: S607 + 'timeout', + str(maxSeconds), + executableName, + '-f', + scriptName, + # f'{os.path.realpath(os.path.dirname(__file__))}' # noqa: ISC003, PTH120 + # + '/taichi_script.py', + ], + stdout=subprocess.PIPE, + check=False, + ) + + forces = [] + floorsCount = 1 # noqa: N816 + for i in range(floorsCount): + forces.append(FloorForces(recorderID=(i + 1))) + + # write the event file + writeEVENT(forces, filenameEVENT, floorsCount=floorsCount) + # writeEVENT(forces, arguments.filenameEVENT) diff --git a/modules/createEVENT/MPMEvent/scene.json b/modules/createEVENT/MPMEvent/scene.json new file mode 100644 index 000000000..abd067cde --- /dev/null +++ b/modules/createEVENT/MPMEvent/scene.json @@ -0,0 +1,192 @@ +{ + "simulation": { + "gpuid": 0, + "gravity": [0.0, -9.80665, 0.0], + "fps": 10, + "frames": 320, + "default_dt": 1.0e-4, + "default_dx": 0.05, + "domain": [90, 4.5, 3.65], + "save_path": "./", + "save_suffix": ".bgeo", + "particles_output_exterior_only": false, + "froude_scaling": 1.0 + }, + "bodies": [ + { + "gpu": 0, + "model": 0, + "type": "particles", + "color": "blue", + "constitutive": "JFluid", + "output_attribs": ["ID", "Pressure", "Velocity_X", "Velocity_Y"], + "track_particle_id": [0], + "track_attribs": ["Position_X"], + "target_attribs": ["Position_Y"], + "material": { + "type": "particles", + "constitutive": "JFluid", + "ppc": 27.0000, + "CFL": 0.425, + "rho": 998.0, + "bulk_modulus": 2.0e8, + "gamma": 7.125, + "viscosity": 0.001 + }, + "algorithm": { + "type": "particles", + "ppc": 27.0000, + "CFL": 0.425, + "use_FEM": false, + "use_ASFLIP": false, + "use_FBAR": true, + "ASFLIP_alpha": 0.0, + "ASFLIP_beta_min": 0.0, + "ASFLIP_beta_max": 0.0, + "FBAR_psi": 0.98, + "FBAR_fused_kernel": true + }, + "velocity": [0.0, 0.0, 0.0], + "geometry": [ + { + "object": "OSU LWF", + "operation": "Add", + "span": [88.1, 1.85, 0.05], + "offset": [1.9, 0.0, 0.0], + "array": [1, 1, 1], + "spacing": [0.0, 0.0, 0.0], + "track_particle_id": [0] + } + ], + "partition_start": [0.0, 0.0, 0.0], + "partition_end": [89.1, 1.85, 0.05] + }, + { + "gpu": 0, + "model": 1, + "type": "particles", + "constitutive": "FixedCorotated", + "output_attribs": ["ID", "Pressure", "Velocity_X", "Velocity_Y"], + "track_particle_id": [0], + "track_attribs": ["Position_X"], + "target_attribs": ["Position_Y"], + "material": { + "constitutive": "FixedCorotated", + "type": "particles", + "ppc": 27.0000, + "CFL": 0.425, + "rho": 988, + "youngs_modulus": 1e8, + "poisson_ratio": 0.0 + }, + "algorithm": { + "type": "particles", + "ppc": 27.0000, + "CFL": 0.425, + "use_ASFLIP": true, + "use_FEM": false, + "use_FBAR": false, + "ASFLIP_alpha": 0.0, + "ASFLIP_beta_min": 0.0, + "ASFLIP_beta_max": 0.0, + "FBAR_psi": 0.0, + "FBAR_fused_kernel": true + }, + "velocity": [0.0, 0.0, 0.0], + "geometry": [ + { + "object": "Box", + "operation": "Add", + "span": [0.5, 0.1, 0.05], + "offset": [40.3, 1.85, 0.0], + "array": [4, 1, 1], + "spacing": [1.0, 0.0, 0.0], + "track_particle_id": [0] + } + ], + "partition_start": [0.0, 0.0, 0.0], + "partition_end": [89.1, 2.5, 0.025] + } + ], + "boundaries": [ + { + "type": "grid", + "object": "Wall", + "contact": "Separable", + "domain_start": [0.0, 0.0, 0.0], + "domain_end": [90, 4.5, 0.05], + "time": [0, 180] + }, + { + "type": "grid", + "object": "OSU LWF", + "contact": "Separable", + "domain_start": [0.0, 0.0, 0.0], + "domain_end": [90, 4.5, 0.05], + "time": [0, 180] + }, + { + "type": "grid", + "object": "Box", + "contact": "Separable", + "domain_start": [45.79, 2.1, 0.0], + "domain_end": [46.86, 2.715, 1.016], + "time": [0, 180] + }, + { + "type": "grid", + "object": "OSU Paddle", + "contact": "Separable", + "domain_start": [1.6, -0.2, -0.2], + "domain_end": [2.0, 5, 4], + "time": [0, 180], + "file": "wmdisp_hydro2sec_1200hz_smooth_14032023.csv", + "output_frequency": 1200 + } + ], + "grid-sensors": [ + { + "type": "grid", + "attribute": "Force", + "operation": "Sum", + "direction": "X+", + "output_frequency": 120, + "domain_start": [45.790, 2.1, -0.2], + "domain_end": [45.85, 2.615, 2.338] + } + ], + "particle-sensors": [ + { + "type": "particles", + "attribute": "Elevation", + "operation": "Max", + "output_frequency": 120, + "domain_start": [16.0, 1.0, 0.0], + "domain_end": [16.1, 3.6, 1.0] + }, + { + "type": "particles", + "attribute": "Elevation", + "operation": "Max", + "output_frequency": 120, + "domain_start": [34.269, 1.0, 0.0], + "domain_end": [34.369, 3.6, 1.1] + }, + { + "type": "particles", + "attribute": "Elevation", + "operation": "Max", + "output_frequency": 120, + "domain_start": [38.114, 1.0, 0.0], + "domain_end": [38.214, 3.6, 1.1] + }, + { + "type": "particles", + "attribute": "Elevation", + "operation": "Max", + "output_frequency": 120, + "domain_start": [45.690, 1.75, 0.0], + "domain_end": [45.790, 3.6, 2.338] + } + ] +} diff --git a/modules/createEVENT/shakeMapEvent/shakeMapEvent.py b/modules/createEVENT/shakeMapEvent/shakeMapEvent.py index 244195009..297b0c8e7 100644 --- a/modules/createEVENT/shakeMapEvent/shakeMapEvent.py +++ b/modules/createEVENT/shakeMapEvent/shakeMapEvent.py @@ -44,12 +44,17 @@ from pathlib import Path +class IntensityMeasureTypeError(Exception): + def __init__(self, im_type): + super().__init__(f'Intensity measure type {im_type} not found in grid data') + + def create_shakemap_event(eventDirectory, eventPath, IMTypes): # noqa: D103, N803 IMTypesList = eval(IMTypes) # noqa: S307, N806 print('Creating shakemap event') # noqa: T201 - xml_file_path = Path(eventDirectory) / eventPath / 'grid.xml' + xml_file_path = Path(eventPath) / 'grid.xml' # Parse the XML file tree = ET.parse(xml_file_path) # noqa: S314 @@ -62,6 +67,19 @@ def create_shakemap_event(eventDirectory, eventPath, IMTypes): # noqa: D103, N8 points = [] attributes = [] + # Get the attribute_mapping + namespace = {'ns': 'http://earthquake.usgs.gov/eqcenter/shakemap'} + grid_fields = {} + for grid_field in root.findall('ns:grid_field', namespace): + index = grid_field.get('index') + name = grid_field.get('name') + units = grid_field.get('units') + grid_fields[name] = {'index': index, 'units': units} + attribute_mapping = {} + for im_type in ['PGA', 'PGV', 'MMI', 'PSA03', 'PSA10', 'PSA30']: + if im_type not in grid_fields: + raise IntensityMeasureTypeError(im_type) + attribute_mapping[im_type] = int(grid_fields[im_type]['index']) - 1 # Parse the grid data for line in grid_data.text.strip().split('\n'): values = line.split() @@ -71,15 +89,6 @@ def create_shakemap_event(eventDirectory, eventPath, IMTypes): # noqa: D103, N8 # Store only the specified attributes attr = {} - attribute_mapping = { - 'PGA': 2, - 'PGV': 3, - 'MMI': 4, - 'PSA03': 5, - 'PSA10': 6, - 'PSA30': 7, - } - for im_type in IMTypesList: if im_type in attribute_mapping: attr[im_type] = float(values[attribute_mapping[im_type]]) @@ -89,11 +98,21 @@ def create_shakemap_event(eventDirectory, eventPath, IMTypes): # noqa: D103, N8 # Create GeoDataFrame gdf = gpd.GeoDataFrame(attributes, geometry=points, crs='EPSG:4326') + if 'PGA' in gdf.columns: + gdf['PGA'] = gdf['PGA'] / 100 # convert from pct g to g + + if 'PSA03' in gdf.columns: + gdf['PSA03'] = gdf['PSA03'] / 100 # convert from pct g to g + gdf = gdf.rename(columns={'PSA03': 'SA_0.3'}) + if 'PSA10' in gdf.columns: + gdf['PSA10'] = gdf['PSA10'] / 100 # convert from pct g to g + gdf = gdf.rename(columns={'PSA10': 'SA_1.0'}) + # Display the first few rows print('Saving shakemap to gpkg') # noqa: T201 # Save as a GeoPackage file - gdf_path = Path(eventDirectory) / 'EventGrid.gpkg' + gdf_path = Path(eventPath) / 'EventGrid.gpkg' gdf.to_file(gdf_path, driver='GPKG') return # noqa: PLR1711 diff --git a/modules/performRegionalMapping/NearestNeighborEvents/NNE.py b/modules/performRegionalMapping/NearestNeighborEvents/NNE.py index f423bcb97..37433796b 100644 --- a/modules/performRegionalMapping/NearestNeighborEvents/NNE.py +++ b/modules/performRegionalMapping/NearestNeighborEvents/NNE.py @@ -56,28 +56,28 @@ def find_neighbors( # noqa: C901, D103 neighbors, filter_label, seed, - doParallel, # noqa: N803 + do_parallel, ): # check if running parallel - numP = 1 # noqa: N806 - procID = 0 # noqa: N806 - runParallel = False # noqa: N806 + num_processes = 1 + process_id = 0 + run_parallel = False - if doParallel == 'True': + if do_parallel == 'True': mpi_spec = importlib.util.find_spec('mpi4py') found = mpi_spec is not None if found: from mpi4py import MPI - runParallel = True # noqa: N806 + run_parallel = True comm = MPI.COMM_WORLD - numP = comm.Get_size() # noqa: N806 - procID = comm.Get_rank() # noqa: N806 - if numP < 2: # noqa: PLR2004 - doParallel = 'False' # noqa: N806 - runParallel = False # noqa: N806 - numP = 1 # noqa: N806 - procID = 0 # noqa: N806 + num_processes = comm.Get_size() + process_id = comm.Get_rank() + if num_processes < 2: # noqa: PLR2004 + do_parallel = 'False' + run_parallel = False + num_processes = 1 + process_id = 0 # read the event grid data file event_grid_path = Path(event_grid_file).resolve() @@ -91,10 +91,10 @@ def find_neighbors( # noqa: C901, D103 # Existing code for CSV files grid_df = pd.read_csv(event_dir / event_grid_file, header=0) - # store the locations of the grid points in X - lat_E = grid_df['Latitude'] # noqa: N806 - lon_E = grid_df['Longitude'] # noqa: N806 - X = np.array([[lo, la] for lo, la in zip(lon_E, lat_E)]) # noqa: N806 + # store the locations of the grid points in grid_locations + lat_e = grid_df['Latitude'] + lon_e = grid_df['Longitude'] + grid_locations = np.array([[lo, la] for lo, la in zip(lon_e, lat_e)]) if filter_label == '': grid_extra_keys = list( @@ -113,10 +113,10 @@ def find_neighbors( # noqa: C901, D103 gdf['Longitude'] = gdf.geometry.x gdf['Latitude'] = gdf.geometry.y - # store the locations of the grid points in X - lat_E = gdf['Latitude'] # noqa: N806 - lon_E = gdf['Longitude'] # noqa: N806 - X = np.array([[lo, la] for lo, la in zip(lon_E, lat_E)]) # noqa: N806 + # store the locations of the grid points in grid_locations + lat_e = gdf['Latitude'] + lon_e = gdf['Longitude'] + grid_locations = np.array([[lo, la] for lo, la in zip(lon_e, lat_e)]) if filter_label == '': grid_extra_keys = list( @@ -128,12 +128,12 @@ def find_neighbors( # noqa: C901, D103 # prepare the tree for the nearest neighbor search if filter_label != '' or len(grid_extra_keys) > 0: - neighbors_to_get = min(neighbors * 10, len(lon_E)) + neighbors_to_get = min(neighbors * 10, len(lon_e)) else: neighbors_to_get = neighbors nbrs = NearestNeighbors(n_neighbors=neighbors_to_get, algorithm='ball_tree').fit( - X + grid_locations ) # load the building data file @@ -141,33 +141,34 @@ def find_neighbors( # noqa: C901, D103 asset_dict = json.load(f) # prepare a dataframe that holds asset filenames and locations - AIM_df = pd.DataFrame( # noqa: N806 + aim_df = pd.DataFrame( columns=['Latitude', 'Longitude', 'file'], index=np.arange(len(asset_dict)) ) count = 0 for i, asset in enumerate(asset_dict): - if runParallel == False or (i % numP) == procID: # noqa: E712 + if run_parallel == False or (i % num_processes) == process_id: # noqa: E712 with open(asset['file'], encoding='utf-8') as f: # noqa: PTH123 asset_data = json.load(f) asset_loc = asset_data['GeneralInformation']['location'] - AIM_df.iloc[count]['Longitude'] = asset_loc['longitude'] - AIM_df.iloc[count]['Latitude'] = asset_loc['latitude'] - AIM_df.iloc[count]['file'] = asset['file'] + aim_id = aim_df.index[count] + aim_df.loc[aim_id, 'Longitude'] = asset_loc['longitude'] + aim_df.loc[aim_id, 'Latitude'] = asset_loc['latitude'] + aim_df.loc[aim_id, 'file'] = asset['file'] count = count + 1 - # store building locations in Y - Y = np.array( # noqa: N806 + # store building locations in bldg_locations + bldg_locations = np.array( [ [lo, la] - for lo, la in zip(AIM_df['Longitude'], AIM_df['Latitude']) + for lo, la in zip(aim_df['Longitude'], aim_df['Latitude']) if not np.isnan(lo) and not np.isnan(la) ] ) # collect the neighbor indices and distances for every building - distances, indices = nbrs.kneighbors(Y) + distances, indices = nbrs.kneighbors(bldg_locations) distances = distances + 1e-20 # initialize the random generator @@ -179,11 +180,12 @@ def find_neighbors( # noqa: C901, D103 count = 0 # iterate through the buildings and store the selected events in the AIM - for asset_i, (AIM_id, dist_list, ind_list) in enumerate( # noqa: B007, N806 - zip(AIM_df.index, distances, indices) + for asset_i, (aim_id, dist_list, ind_list) in enumerate( # noqa: B007 + zip(aim_df.index, distances, indices) ): # open the AIM file - asst_file = AIM_df.iloc[AIM_id]['file'] + aim_index_id = aim_df.index[aim_id] + asst_file = aim_df.loc[aim_index_id, 'file'] with open(asst_file, encoding='utf-8') as f: # noqa: PTH123 asset_data = json.load(f) @@ -321,7 +323,8 @@ def find_neighbors( # noqa: C901, D103 for col in grid_df.columns if col not in ['geometry', 'Longitude', 'Latitude'] ] - event_count = len(im_columns) + # event_count = len(im_columns) + event_count = 1 # for each neighbor for sample_j, nbr in enumerate(nbr_samples): @@ -332,26 +335,27 @@ def find_neighbors( # noqa: C901, D103 nbr_index = ind_list[nbr] # For GIS files, create a new CSV file - csv_filename = f'Site_{sample_j}.csv' + csv_filename = f'Site_{nbr_index}.csv' csv_path = event_dir / csv_filename - # Create a CSV file with data from the GIS file - # Use actual data from the GIS file if available, otherwise use dummy data - im_columns = [ - col - for col in grid_df.columns - if col not in ['geometry', 'Longitude', 'Latitude'] - ] - - im_data = pd.DataFrame( - { - col: [grid_df.iloc[nbr_index][col]] * event_count - for col in im_columns - } - ) - - im_data.to_csv(csv_path, index=False) + if not csv_path.exists(): + # Create a CSV file with data from the GIS file + # Use actual data from the GIS file if available, otherwise use dummy data + im_columns = [ + col + for col in grid_df.columns + if col not in ['geometry', 'Longitude', 'Latitude'] + ] + + im_data = pd.DataFrame( + { + col: [grid_df.iloc[nbr_index][col]] * event_count + for col in im_columns + } + ) + + im_data.to_csv(csv_path, index=False) # save the collection file name and the IM row id event_list.append(csv_filename + f'x{event_j}') diff --git a/modules/performSIMULATION/capacitySpectrum/CapacityModels.py b/modules/performSIMULATION/capacitySpectrum/CapacityModels.py index c55830d46..6a31cd9fe 100644 --- a/modules/performSIMULATION/capacitySpectrum/CapacityModels.py +++ b/modules/performSIMULATION/capacitySpectrum/CapacityModels.py @@ -88,6 +88,7 @@ def convert_story_rise(structureType, stories): # noqa: N803 rise = None else: + rise = None # First, check if we have valid story information try: stories = int(stories) @@ -340,7 +341,7 @@ def __init__(self, general_info, dD=0.001): # noqa: N803 self.Dy = self.capacity_data[self.design_level][self.HAZUS_type]['Dy'] self.Ay = self.capacity_data[self.design_level][self.HAZUS_type]['Ay'] except KeyError: - msg = f'No capacity data for {self.HAZUS_type} and {self.design_level}' + msg = f'No capacity data for build class {self.HAZUS_type} and design level {self.design_level}' raise KeyError(msg) # noqa: B904 self.cao_peterson_2006 = cao_peterson_2006( self.Dy, self.Ay, self.Du, self.Au, dD diff --git a/modules/performSIMULATION/capacitySpectrum/DampingModels.py b/modules/performSIMULATION/capacitySpectrum/DampingModels.py index feb455461..916400441 100644 --- a/modules/performSIMULATION/capacitySpectrum/DampingModels.py +++ b/modules/performSIMULATION/capacitySpectrum/DampingModels.py @@ -256,6 +256,8 @@ def get_beta(self, Dp, Ap): # noqa: N803 f'The base model {self.base_model} does not have a useful' 'get_kappa method.' ) + if Dp <= 0 or Ap <= 0: + return beta_elastic Du = self.capacity.Du # noqa: N806 Ax = self.capacity.Ax # noqa: N806 B = self.capacity.B # noqa: N806 diff --git a/modules/performSIMULATION/capacitySpectrum/runCMS.py b/modules/performSIMULATION/capacitySpectrum/runCMS.py index c17fb7fba..3533c920f 100644 --- a/modules/performSIMULATION/capacitySpectrum/runCMS.py +++ b/modules/performSIMULATION/capacitySpectrum/runCMS.py @@ -97,10 +97,8 @@ def find_performance_point(cap_x, cap_y, dem_x, dem_y, dd=0.001): elif dem_y_interp[0] < cap_y_interp[0]: perf_x = 0.001 # x_interp[0] perf_y = 0.001 # cap_y_interp[0] - # except IndexError as err: - # print('No performance point found; curves do not intersect.') - # print('IndexError: ') - # print(err) + else: + print('No performance point found; curves do not intersect.') return perf_x, perf_y @@ -398,11 +396,35 @@ def write_RV(AIM_input_path, EVENT_input_path): # noqa: C901, N802, N803, D103 EDP_output = np.concatenate([index, EDP_output], axis=1) # noqa: N806 + # Concatenate the original IM to CMS in case some (e.g., PGD) are needed + EDP_output = np.concatenate([EDP_output, IM_samples], axis = 1) + # prepare the header + header_out = ['1-PFA-0-0', '1-PRD-1-1'] + for h_label in header: + # remove leading and trailing whitespace + h_label = h_label.strip() # noqa: PLW2901 + + # convert suffixes to the loc-dir format used by the SimCenter + if h_label.endswith('_h'): # horizontal + header_out.append(f'1-{h_label[:-2]}-1-1') + + elif h_label.endswith('_v'): # vertical + header_out.append(f'1-{h_label[:-2]}-1-3') + + elif h_label.endswith('_x'): # x direction + header_out.append(f'1-{h_label[:-2]}-1-1') + + elif h_label.endswith('_y'): # y direction + header_out.append(f'1-{h_label[:-2]}-1-2') + + else: # if none of the above is given, default to 1-1 + header_out.append(f'1-{h_label.strip()}-1-1') + + working_dir = Path(PurePath(EVENT_input_path).parent) # working_dir = posixpath.dirname(EVENT_input_path) - # prepare the header - header_out = ['1-PFA-0-0', '1-PRD-1-1'] + np.savetxt( working_dir / 'response.csv', diff --git a/modules/systemPerformance/ResidualDemand/run_residual_demand.py b/modules/systemPerformance/ResidualDemand/run_residual_demand.py index ab099957f..e4261764a 100644 --- a/modules/systemPerformance/ResidualDemand/run_residual_demand.py +++ b/modules/systemPerformance/ResidualDemand/run_residual_demand.py @@ -357,15 +357,26 @@ def aggregate_delay_results(undamaged_time, damaged_time, od_file_pre, od_file_p compare_df.loc[undamaged_time['agent_id'], 'mean_time_used_undamaged'] = ( undamaged_time['data'].mean(axis=1) ) - compare_df.loc[undamaged_time['agent_id'], 'std_time_used_undamaged'] = ( - undamaged_time['data'].std(axis=1) - ) + compare_df.loc[damaged_time['agent_id'], 'mean_time_used_damaged'] = ( damaged_time['data'].mean(axis=1) ) - compare_df.loc[damaged_time['agent_id'], 'std_time_used_damaged'] = damaged_time[ - 'data' - ].std(axis=1) + + std_time_used_undamaged = np.zeros(len(undamaged_time['agent_id'])) + rows_with_inf = np.any(np.isinf(undamaged_time['data']), axis=1) + std_time_used_undamaged[rows_with_inf] = np.nan + std_time_used_undamaged[~rows_with_inf] = undamaged_time['data'][~rows_with_inf].std( + axis=1 + ) + compare_df.loc[undamaged_time['agent_id'], 'std_time_used_undamaged'] = std_time_used_undamaged + + std_time_used_damaged = np.zeros(len(damaged_time['agent_id'])) + rows_with_inf = np.any(np.isinf(damaged_time['data']), axis=1) + std_time_used_damaged[rows_with_inf] = np.nan + std_time_used_damaged[~rows_with_inf] = damaged_time['data'][~rows_with_inf].std( + axis=1 + ) + compare_df.loc[damaged_time['agent_id'], 'std_time_used_damaged'] = std_time_used_damaged inner_agents = od_df_pre.merge(od_df_post, on='agent_id', how='inner')[ 'agent_id' @@ -385,13 +396,24 @@ def aggregate_delay_results(undamaged_time, damaged_time, od_file_pre, od_file_p - undamaged_time['data'][indices_in_undamaged, :] ) delay_ratio = delay_duration / undamaged_time['data'][indices_in_undamaged, :] + + std_delay_duration = np.zeros(len(inner_agents)) + rows_with_inf = np.any(np.isinf(delay_duration), axis=1) + std_delay_duration[rows_with_inf] = np.nan + std_delay_duration[~rows_with_inf] = delay_duration[~rows_with_inf].std(axis=1) + + std_delay_ratio = np.zeros(len(inner_agents)) + rows_with_inf = np.any(np.isinf(delay_ratio), axis=1) + std_delay_ratio[rows_with_inf] = np.nan + std_delay_ratio[~rows_with_inf] = delay_ratio[~rows_with_inf].std(axis=1) + delay_df = pd.DataFrame( data={ 'agent_id': inner_agents, 'mean_delay_duration': delay_duration.mean(axis=1), 'mean_delay_ratio': delay_ratio.mean(axis=1), - 'std_delay_duration': delay_duration.std(axis=1), - 'std_delay_ratio': delay_ratio.std(axis=1), + 'std_delay_duration': std_delay_duration, + 'std_delay_ratio': std_delay_ratio, } ) @@ -770,6 +792,7 @@ def run_one_realization( trip_info_compare['delay_duration'] / trip_info_compare['travel_time_used_undamaged'] ) + trip_info_compare = trip_info_compare.replace([np.inf, -np.inf], 'inf') trip_info_compare.to_csv('trip_info_compare.csv', index=False) return True @@ -868,6 +891,8 @@ def run_residual_demand( # noqa: C901 edges_gdf['capacity'] = edges_gdf['lanes'] * 1800 edges_gdf['normal_capacity'] = edges_gdf['capacity'] edges_gdf['normal_maxspeed'] = edges_gdf['maxspeed'] + edges_gdf['start_nid'] = edges_gdf['start_nid'].astype(int) + edges_gdf['end_nid'] = edges_gdf['end_nid'].astype(int) # edges_gdf['fft'] = edges_gdf['length']/edges_gdf['maxspeed'] * 2.23694 edges_gdf.to_csv('edges.csv', index=False) nodes_gdf = gpd.read_file(node_geojson) diff --git a/modules/systemPerformance/ResidualDemand/transportation.py b/modules/systemPerformance/ResidualDemand/transportation.py index 8f54702ca..d5e3ef17d 100644 --- a/modules/systemPerformance/ResidualDemand/transportation.py +++ b/modules/systemPerformance/ResidualDemand/transportation.py @@ -65,13 +65,39 @@ from sklearn.feature_extraction.text import TfidfVectorizer from scipy.spatial.distance import cdist from shapely.wkt import loads -from brails.utils.geoTools import haversine_dist -from brails.workflow.TransportationElementHandler import ( - ROADLANES_MAP, - ROADSPEED_MAP, - calculate_road_capacity, -) +# from brails.utils.geoTools import haversine_dist +# from brails.workflow.TransportationElementHandler import ( +# ROADLANES_MAP, +# ROADSPEED_MAP, +# calculate_road_capacity, +# ) + +## The below functions are mannually copied from brails to avoid importing brails + +ROADLANES_MAP = {'S1100': 4, "S1200": 2, "S1400": 1, "S1500": 1, "S1630": 1, + "S1640": 1, "S1710": 1, "S1720": 1, "S1730": 1, "S1740": 1, + "S1750": 1, "S1780": 1, "S1810": 1, "S1820": 1, "S1830": 1} + +ROADSPEED_MAP = {'S1100': 70, "S1200": 55, "S1400": 25, "S1500": 25, + "S1630": 25, "S1640": 25, "S1710": 25, "S1720": 25, + "S1730": 25, "S1740": 10, "S1750": 10, "S1780": 10, + "S1810": 10, "S1820": 10, "S1830": 10} +def calculate_road_capacity(nlanes: int, + traffic_volume_per_lane: int = 1800 + ) -> int: + """ + Calculate road capacity from number of lanes & traffic volume/lane. + + Parameters__ + nlanes (int): The number of lanes on the road. + traffic_volume_per_lane (int, optional): The traffic volume + capacity per lane. Default is 1800 vehicles. + Returns__ + int: The total road capacity, which is the product of the number + of lanes and the traffic volume per lane. + """ + return nlanes * traffic_volume_per_lane class TransportationPerformance(ABC): # noqa: B024 """ @@ -393,577 +419,538 @@ def get_graph_network(self, csv_file_dir) -> None: # noqa: D102 nodes_file = os.path.join(csv_file_dir, self.csv_files['network_nodes']) # noqa: PTH118 nodes_gpd.to_csv(nodes_file, index=False) return # noqa: PLR1711 + def substep_assignment( # noqa: PLR0913 + self, + nodes_df=None, + weighted_edges_df=None, + od_ss=None, + quarter_demand=None, + assigned_demand=None, + quarter_counts=4, + trip_info=None, + agent_time_limit=0, + sample_interval=1, + agents_path=None, + hour=None, + quarter=None, + ss_id=None, + alpha_f=0.3, + beta_f=3, + two_way_edges=False, # noqa: FBT002 + ): + # open_edges_df = weighted_edges_df.loc[weighted_edges_df['fft'] < 36000] # noqa: PLR2004 + open_edges_df = weighted_edges_df + + net = pdna.Network( + nodes_df['x'], + nodes_df['y'], + open_edges_df['start_nid'], + open_edges_df['end_nid'], + open_edges_df[['weight']], + twoway=two_way_edges, + ) - # @abstractmethod - def system_performance( # noqa: C901, D102, PLR0915 - self, state # noqa: ARG002 - ) -> None: # Move the CSV creation here - def substep_assignment( # noqa: PLR0913 - nodes_df=None, - weighted_edges_df=None, - od_ss=None, - quarter_demand=None, - assigned_demand=None, - quarter_counts=4, - trip_info=None, - agent_time_limit=0, - sample_interval=1, - agents_path=None, - hour=None, - quarter=None, - ss_id=None, - alpha_f=0.3, - beta_f=3, - two_way_edges=False, # noqa: FBT002 - ): - open_edges_df = weighted_edges_df.loc[weighted_edges_df['fft'] < 36000] # noqa: PLR2004 - - net = pdna.Network( - nodes_df['x'], - nodes_df['y'], - open_edges_df['start_nid'], - open_edges_df['end_nid'], - open_edges_df[['weight']], - twoway=two_way_edges, - ) - - # print('network') # noqa: RUF100, T201 - net.set(pd.Series(net.node_ids)) - # print('net') # noqa: RUF100, T201 - - nodes_origin = od_ss['origin_nid'].to_numpy() - nodes_destin = od_ss['destin_nid'].to_numpy() - nodes_current = od_ss['current_nid'].to_numpy() - agent_ids = od_ss['agent_id'].to_numpy() - agent_current_links = od_ss['current_link'].to_numpy() - agent_current_link_times = od_ss['current_link_time'].to_numpy() - paths = net.shortest_paths(nodes_current, nodes_destin) - - # check agent time limit - path_lengths = net.shortest_path_lengths(nodes_current, nodes_destin) - remove_agent_list = [] - if agent_time_limit is None: - pass - else: - for agent_idx, agent_id in enumerate(agent_ids): - planned_trip_length = path_lengths[agent_idx] - # agent_time_limit[agent_id] - trip_length_limit = agent_time_limit - if planned_trip_length > trip_length_limit + 0: - remove_agent_list.append(agent_id) - - edge_travel_time_dict = weighted_edges_df['t_avg'].T.to_dict() - edge_current_vehicles = weighted_edges_df['veh_current'].T.to_dict() - edge_quarter_vol = weighted_edges_df['vol_true'].T.to_dict() - # edge_length_dict = weighted_edges_df['length'].T.to_dict() - od_residual_ss_list = [] - # all_paths = [] - path_i = 0 - for path in paths: - trip_origin = nodes_origin[path_i] - trip_destin = nodes_destin[path_i] - agent_id = agent_ids[path_i] - # remove some agent (path too long) - if agent_id in remove_agent_list: - path_i += 1 - # no need to update trip info - continue - remaining_time = ( - 3600 / quarter_counts + agent_current_link_times[path_i] - ) - used_time = 0 - for edge_s, edge_e in zip(path, path[1:]): - edge_str = f'{edge_s}-{edge_e}' - edge_travel_time = edge_travel_time_dict[edge_str] - - if (remaining_time > edge_travel_time) and ( - edge_travel_time < 36000 # noqa: PLR2004 - ): - # all_paths.append(edge_str) - # p_dist += edge_travel_time - remaining_time -= edge_travel_time - used_time += edge_travel_time - edge_quarter_vol[edge_str] += 1 * sample_interval - trip_stop = edge_e - - if edge_str == agent_current_links[path_i]: - edge_current_vehicles[edge_str] -= 1 * sample_interval - else: - if edge_str != agent_current_links[path_i]: - edge_current_vehicles[edge_str] += 1 * sample_interval - new_current_link = edge_str - new_current_link_time = remaining_time - trip_stop = edge_s - od_residual_ss_list.append( - [ - agent_id, - trip_origin, - trip_destin, - trip_stop, - new_current_link, - new_current_link_time, - ] - ) - break - trip_info[(agent_id, trip_origin, trip_destin)][0] += ( - 3600 / quarter_counts - ) - trip_info[(agent_id, trip_origin, trip_destin)][1] += used_time - trip_info[(agent_id, trip_origin, trip_destin)][2] = trip_stop - trip_info[(agent_id, trip_origin, trip_destin)][3] = hour - trip_info[(agent_id, trip_origin, trip_destin)][4] = quarter - trip_info[(agent_id, trip_origin, trip_destin)][5] = ss_id + # print('network') # noqa: RUF100, T201 + net.set(pd.Series(net.node_ids)) + # print('net') # noqa: RUF100, T201 + + nodes_origin = od_ss['origin_nid'].to_numpy() + nodes_destin = od_ss['destin_nid'].to_numpy() + nodes_current = od_ss['current_nid'].to_numpy() + agent_ids = od_ss['agent_id'].to_numpy() + agent_current_links = od_ss['current_link'].to_numpy() + agent_current_link_times = od_ss['current_link_time'].to_numpy() + paths = net.shortest_paths(nodes_current, nodes_destin) + + # check agent time limit + path_lengths = net.shortest_path_lengths(nodes_current, nodes_destin) + remove_agent_list = [] + if agent_time_limit is None: + pass + else: + for agent_idx, agent_id in enumerate(agent_ids): + planned_trip_length = path_lengths[agent_idx] + # agent_time_limit[agent_id] + trip_length_limit = agent_time_limit + if planned_trip_length > trip_length_limit + 0: + remove_agent_list.append(agent_id) + + edge_travel_time_dict = weighted_edges_df['t_avg'].T.to_dict() + edge_current_vehicles = weighted_edges_df['veh_current'].T.to_dict() + edge_quarter_vol = weighted_edges_df['vol_true'].T.to_dict() + # edge_length_dict = weighted_edges_df['length'].T.to_dict() + od_residual_ss_list = [] + # all_paths = [] + path_i = 0 + for path in paths: + trip_origin = nodes_origin[path_i] + trip_destin = nodes_destin[path_i] + agent_id = agent_ids[path_i] + # remove some agent (path too long) + if agent_id in remove_agent_list: path_i += 1 - - new_edges_df = weighted_edges_df[ - [ - 'uniqueid', - 'u', - 'v', - 'start_nid', - 'end_nid', - 'fft', - 'capacity', - 'normal_fft', - 'normal_capacity', - 'length', - 'vol_true', - 'vol_tot', - 'veh_current', - 'geometry', - ] - ].copy() - # new_edges_df = new_edges_df.join(edge_volume, how='left') - # new_edges_df['vol_ss'] = new_edges_df['vol_ss'].fillna(0) - # new_edges_df['vol_true'] += new_edges_df['vol_ss'] - new_edges_df['vol_true'] = new_edges_df.index.map(edge_quarter_vol) - new_edges_df['veh_current'] = new_edges_df.index.map( - edge_current_vehicles - ) - # new_edges_df['vol_tot'] += new_edges_df['vol_ss'] - new_edges_df['flow'] = ( - new_edges_df['vol_true'] * quarter_demand / assigned_demand - ) * quarter_counts - new_edges_df['t_avg'] = new_edges_df['fft'] * ( - 1 - + alpha_f - * (new_edges_df['flow'] / new_edges_df['capacity']) ** beta_f - ) - new_edges_df['t_avg'] = np.where( - new_edges_df['t_avg'] > 36000, 36000, new_edges_df['t_avg'] # noqa: PLR2004 + # no need to update trip info + continue + remaining_time = ( + 3600 / quarter_counts + agent_current_link_times[path_i] ) - new_edges_df['t_avg'] = new_edges_df['t_avg'].round(2) - - return new_edges_df, od_residual_ss_list, trip_info, agents_path - - def write_edge_vol( - edges_df=None, - simulation_outputs=None, - quarter=None, - hour=None, - scen_nm=None, - ): - if 'flow' in edges_df.columns: - if edges_df.shape[0] < 10: # noqa: PLR2004 - edges_df[ + used_time = 0 + for edge_s, edge_e in zip(path, path[1:]): + edge_str = f'{edge_s}-{edge_e}' + edge_travel_time = edge_travel_time_dict[edge_str] + if (remaining_time > edge_travel_time) and ( + edge_travel_time < 36000 # noqa: PLR2004 + ): + # all_paths.append(edge_str) + # p_dist += edge_travel_time + remaining_time -= edge_travel_time + used_time += edge_travel_time + edge_quarter_vol[edge_str] += 1 * sample_interval + trip_stop = edge_e + if edge_str == agent_current_links[path_i]: + edge_current_vehicles[edge_str] -= 1 * sample_interval + else: + if edge_str != agent_current_links[path_i]: + edge_current_vehicles[edge_str] += 1 * sample_interval + new_current_link = edge_str + new_current_link_time = remaining_time + trip_stop = edge_s + od_residual_ss_list.append( [ - 'uniqueid', - 'start_nid', - 'end_nid', - 'capacity', - 'veh_current', - 'vol_true', - 'vol_tot', - 'flow', - 't_avg', - 'geometry', + agent_id, + trip_origin, + trip_destin, + trip_stop, + new_current_link, + new_current_link_time, ] - ].to_csv( - f'{simulation_outputs}/edge_vol/edge_vol_hr{hour}_' - f'qt{quarter}_{scen_nm}.csv', - index=False, ) + break + trip_info[(agent_id, trip_origin, trip_destin)][0] += ( + 3600 / quarter_counts + ) + trip_info[(agent_id, trip_origin, trip_destin)][1] += used_time + trip_info[(agent_id, trip_origin, trip_destin)][2] = trip_stop + trip_info[(agent_id, trip_origin, trip_destin)][3] = hour + trip_info[(agent_id, trip_origin, trip_destin)][4] = quarter + trip_info[(agent_id, trip_origin, trip_destin)][5] = ss_id + path_i += 1 + + new_edges_df = weighted_edges_df[ + [ + 'uniqueid', + 'u', + 'v', + 'start_nid', + 'end_nid', + 'fft', + 'capacity', + 'normal_fft', + 'normal_capacity', + 'length', + 'vol_true', + 'vol_tot', + 'veh_current', + 'geometry', + ] + ].copy() + # new_edges_df = new_edges_df.join(edge_volume, how='left') + # new_edges_df['vol_ss'] = new_edges_df['vol_ss'].fillna(0) + # new_edges_df['vol_true'] += new_edges_df['vol_ss'] + new_edges_df['vol_true'] = new_edges_df.index.map(edge_quarter_vol) + new_edges_df['veh_current'] = new_edges_df.index.map( + edge_current_vehicles + ) + # new_edges_df['vol_tot'] += new_edges_df['vol_ss'] + new_edges_df['flow'] = ( + new_edges_df['vol_true'] * quarter_demand / assigned_demand + ) * quarter_counts + new_edges_df['t_avg'] = new_edges_df['fft'] * ( + 1 + + alpha_f + * (new_edges_df['flow'] / new_edges_df['capacity']) ** beta_f + ) + new_edges_df['t_avg'] = np.where( + new_edges_df['t_avg'] > 36000, 36000, new_edges_df['t_avg'] # noqa: PLR2004 + ) + new_edges_df['t_avg'] = new_edges_df['t_avg'].round(2) + return new_edges_df, od_residual_ss_list, trip_info, agents_path + def write_edge_vol( + self, + edges_df=None, + simulation_outputs=None, + quarter=None, + hour=None, + scen_nm=None, + ): + if 'flow' in edges_df.columns: + if edges_df.shape[0] < 10: # noqa: PLR2004 + edges_df[ + [ + 'uniqueid', + 'start_nid', + 'end_nid', + 'capacity', + 'veh_current', + 'vol_true', + 'vol_tot', + 'flow', + 't_avg', + 'geometry', + ] + ].to_csv( + f'{simulation_outputs}/edge_vol/edge_vol_hr{hour}_' + f'qt{quarter}_{scen_nm}.csv', + index=False, + ) + else: + edges_df.loc[ + edges_df['vol_true'] > 0, + [ + 'uniqueid', + 'start_nid', + 'end_nid', + 'capacity', + 'veh_current', + 'vol_true', + 'vol_tot', + 'flow', + 't_avg', + 'geometry', + ], + ].to_csv( + f'{simulation_outputs}/edge_vol/edge_vol_hr{hour}_' + f'qt{quarter}_{scen_nm}.csv', + index=False, + ) + def write_final_vol( + self, + edges_df=None, + simulation_outputs=None, + quarter=None, + hour=None, + scen_nm=None, + ): + edges_df.loc[ + edges_df['vol_tot'] > 0, + ['uniqueid', 'start_nid', 'end_nid', 'vol_tot', 'geometry'], + ].to_csv( + simulation_outputs / 'edge_vol' / f'final_edge_vol_hr{hour}_qt' + f'{quarter}_{scen_nm}.csv', + index=False, + ) + def assignment( # noqa: C901, PLR0913 + self, + quarter_counts=6, + substep_counts=15, + substep_size=30000, + edges_df=None, + nodes_df=None, + od_all=None, + simulation_outputs=None, + scen_nm=None, + hour_list=None, + quarter_list=None, + cost_factor=None, # noqa: ARG001 + closure_hours=None, + closed_links=None, + agent_time_limit=None, + sample_interval=1, + agents_path=None, + alpha_f=0.3, + beta_f=4, + two_way_edges=False, # noqa: FBT002 + save_edge_vol=True, + ): + if closure_hours is None: + closure_hours = [] + + # Check if all od pairs has path. If not, + orig = od_all['origin_nid'].to_numpy() + dest = od_all['destin_nid'].to_numpy() + open_edges_df = edges_df[ + ~edges_df['uniqueid'].isin(closed_links['uniqueid'].to_numpy()) + ] + net = pdna.Network( + nodes_df['x'], + nodes_df['y'], + open_edges_df['start_nid'], + open_edges_df['end_nid'], + open_edges_df[['fft']], + twoway=two_way_edges, + ) + net.set(pd.Series(net.node_ids)) + paths = net.shortest_paths(orig, dest) + no_path_ind = [i for i in range(len(paths)) if len(paths[i]) == 0] + od_no_path = od_all.iloc[no_path_ind].copy() + od_all = od_all.drop(no_path_ind) + + od_all['current_nid'] = od_all['origin_nid'] + trip_info = { + (od.agent_id, od.origin_nid, od.destin_nid): [ + 0, + 0, + od.origin_nid, + 0, + od.hour, + od.quarter, + 0, + 0, + ] + for od in od_all.itertuples() + } + # Quarters and substeps + # probability of being in each division of hour + if quarter_list is None: + quarter_counts = 4 + else: + quarter_counts = len(quarter_list) + quarter_ps = [1 / quarter_counts for i in range(quarter_counts)] + quarter_ids = list(range(quarter_counts)) + + # initial setup + od_residual_list = [] + # accumulator + edges_df['vol_tot'] = 0 + edges_df['veh_current'] = 0 + + # Loop through days and hours + for _day in ['na']: + for hour in hour_list: + gc.collect() + if hour in closure_hours: + for row in closed_links.itertuples(): + edges_df.loc[ + (edges_df['uniqueid'] == row.uniqueid), 'capacity' + ] = 1 + edges_df.loc[ + (edges_df['uniqueid'] == row.uniqueid), 'fft' + ] = 36000 else: - edges_df.loc[ - edges_df['vol_true'] > 0, + edges_df['capacity'] = edges_df['normal_capacity'] + edges_df['fft'] = edges_df['normal_fft'] + + # Read OD + od_hour = od_all[od_all['hour'] == hour].copy() + if od_hour.shape[0] == 0: + od_hour = pd.DataFrame([], columns=od_all.columns) + od_hour['current_link'] = None + od_hour['current_link_time'] = 0 + + # Divide into quarters + if 'quarter' in od_all.columns: + pass + else: + od_quarter_msk = np.random.choice( + quarter_ids, size=od_hour.shape[0], p=quarter_ps + ) + od_hour['quarter'] = od_quarter_msk + + if quarter_list is None: + quarter_list = quarter_ids + for quarter in quarter_list: + # New OD in assignment period + od_quarter = od_hour.loc[ + od_hour['quarter'] == quarter, [ - 'uniqueid', - 'start_nid', - 'end_nid', - 'capacity', - 'veh_current', - 'vol_true', - 'vol_tot', - 'flow', - 't_avg', - 'geometry', + 'agent_id', + 'origin_nid', + 'destin_nid', + 'current_nid', + 'current_link', + 'current_link_time', + ], + ] + # Add resudal OD + od_residual = pd.DataFrame( + od_residual_list, + columns=[ + 'agent_id', + 'origin_nid', + 'destin_nid', + 'current_nid', + 'current_link', + 'current_link_time', ], - ].to_csv( - f'{simulation_outputs}/edge_vol/edge_vol_hr{hour}_' - f'qt{quarter}_{scen_nm}.csv', - index=False, ) + od_residual['quarter'] = quarter + # Total OD in each assignment period is the combined + # of new and residual OD: + od_quarter = pd.concat( + [od_quarter, od_residual], sort=False, ignore_index=True + ) + # Residual OD is no longer residual after it has been + # merged to the quarterly OD: + od_residual_list = [] + od_quarter = od_quarter[ + od_quarter['current_nid'] != od_quarter['destin_nid'] + ] + # total demand for this quarter, including total and + # residual demand: + quarter_demand = od_quarter.shape[0] + # how many among the OD pairs to be assigned in this + # quarter are actually residual from previous quarters + residual_demand = od_residual.shape[0] + assigned_demand = 0 + + substep_counts = max(1, (quarter_demand // substep_size) + 1) + logging.info( + f'HR {hour} QT {quarter} has ' + f'{quarter_demand}/{residual_demand} od' + f's/residuals {substep_counts} substeps' + ) + substep_ps = [ + 1 / substep_counts for i in range(substep_counts) + ] + substep_ids = list(range(substep_counts)) + od_substep_msk = np.random.choice( + substep_ids, size=quarter_demand, p=substep_ps + ) + od_quarter['ss_id'] = od_substep_msk + # reset volume at each quarter + edges_df['vol_true'] = 0 - def write_final_vol( - edges_df=None, - simulation_outputs=None, - quarter=None, - hour=None, - scen_nm=None, - ): - edges_df.loc[ - edges_df['vol_tot'] > 0, - ['uniqueid', 'start_nid', 'end_nid', 'vol_tot', 'geometry'], - ].to_csv( - simulation_outputs / 'edge_vol' / f'final_edge_vol_hr{hour}_qt' - f'{quarter}_{scen_nm}.csv', - index=False, - ) - - def assignment( # noqa: C901, PLR0913 - quarter_counts=6, - substep_counts=15, - substep_size=30000, - edges_df=None, - nodes_df=None, - od_all=None, - simulation_outputs=None, - scen_nm=None, - hour_list=None, - quarter_list=None, - cost_factor=None, # noqa: ARG001 - closure_hours=None, - closed_links=None, - agent_time_limit=None, - sample_interval=1, - agents_path=None, - alpha_f=0.3, - beta_f=4, - two_way_edges=False, # noqa: FBT002 - ): - if closure_hours is None: - closure_hours = [] - - # Check if all od pairs has path. If not, - orig = od_all['origin_nid'].to_numpy() - dest = od_all['destin_nid'].to_numpy() - open_edges_df = edges_df[ - ~edges_df['uniqueid'].isin(closed_links['uniqueid'].to_numpy()) - ] - net = pdna.Network( - nodes_df['x'], - nodes_df['y'], - open_edges_df['start_nid'], - open_edges_df['end_nid'], - open_edges_df[['fft']], - twoway=two_way_edges, - ) - paths = net.shortest_paths(orig, dest) - no_path_ind = [i for i in range(len(paths)) if len(paths[i]) == 0] - od_no_path = od_all.iloc[no_path_ind].copy() - od_all = od_all.drop(no_path_ind) - - od_all['current_nid'] = od_all['origin_nid'] - trip_info = { - (od.agent_id, od.origin_nid, od.destin_nid): [ - 0, - 0, - od.origin_nid, - 0, - od.hour, - od.quarter, - 0, - 0, - ] - for od in od_all.itertuples() - } - - # Quarters and substeps - # probability of being in each division of hour - if quarter_list is None: - quarter_counts = 4 - else: - quarter_counts = len(quarter_list) - quarter_ps = [1 / quarter_counts for i in range(quarter_counts)] - quarter_ids = list(range(quarter_counts)) - - # initial setup - od_residual_list = [] - # accumulator - edges_df['vol_tot'] = 0 - edges_df['veh_current'] = 0 - - # Loop through days and hours - for _day in ['na']: - for hour in hour_list: - gc.collect() - if hour in closure_hours: - for row in closed_links.itertuples(): - edges_df.loc[ - (edges_df['uniqueid'] == row.uniqueid), 'capacity' - ] = 1 - edges_df.loc[ - (edges_df['uniqueid'] == row.uniqueid), 'fft' - ] = 36000 - else: - edges_df['capacity'] = edges_df['normal_capacity'] - edges_df['fft'] = edges_df['normal_fft'] - - # Read OD - od_hour = od_all[od_all['hour'] == hour].copy() - if od_hour.shape[0] == 0: - od_hour = pd.DataFrame([], columns=od_all.columns) - od_hour['current_link'] = None - od_hour['current_link_time'] = 0 - - # Divide into quarters - if 'quarter' in od_all.columns: - pass - else: - od_quarter_msk = np.random.choice( - quarter_ids, size=od_hour.shape[0], p=quarter_ps - ) - od_hour['quarter'] = od_quarter_msk - - if quarter_list is None: - quarter_list = quarter_ids - for quarter in quarter_list: - # New OD in assignment period - od_quarter = od_hour.loc[ - od_hour['quarter'] == quarter, - [ - 'agent_id', - 'origin_nid', - 'destin_nid', - 'current_nid', - 'current_link', - 'current_link_time', - ], - ] - # Add resudal OD - od_residual = pd.DataFrame( - od_residual_list, - columns=[ - 'agent_id', - 'origin_nid', - 'destin_nid', - 'current_nid', - 'current_link', - 'current_link_time', - ], - ) - od_residual['quarter'] = quarter - # Total OD in each assignment period is the combined - # of new and residual OD: - od_quarter = pd.concat( - [od_quarter, od_residual], sort=False, ignore_index=True - ) - # Residual OD is no longer residual after it has been - # merged to the quarterly OD: - od_residual_list = [] - od_quarter = od_quarter[ - od_quarter['current_nid'] != od_quarter['destin_nid'] - ] - - # total demand for this quarter, including total and - # residual demand: - quarter_demand = od_quarter.shape[0] - # how many among the OD pairs to be assigned in this - # quarter are actually residual from previous quarters - residual_demand = od_residual.shape[0] - assigned_demand = 0 - - substep_counts = max(1, (quarter_demand // substep_size) + 1) + for ss_id in substep_ids: + gc.collect() + time_ss_0 = time.time() logging.info( - f'HR {hour} QT {quarter} has ' - f'{quarter_demand}/{residual_demand} od' - f's/residuals {substep_counts} substeps' + f'Hour: {hour}, Quarter: {quarter}, ' + 'SS ID: {ss_id}' ) - substep_ps = [ - 1 / substep_counts for i in range(substep_counts) - ] - substep_ids = list(range(substep_counts)) - od_substep_msk = np.random.choice( - substep_ids, size=quarter_demand, p=substep_ps - ) - od_quarter['ss_id'] = od_substep_msk - - # reset volume at each quarter - edges_df['vol_true'] = 0 - - for ss_id in substep_ids: - gc.collect() - - time_ss_0 = time.time() - logging.info( - f'Hour: {hour}, Quarter: {quarter}, ' - 'SS ID: {ss_id}' - ) - od_ss = od_quarter[od_quarter['ss_id'] == ss_id] - assigned_demand += od_ss.shape[0] - if assigned_demand == 0: - continue - # calculate weight - weighted_edges_df = edges_df.copy() - # weight by travel distance - # weighted_edges_df['weight'] = edges_df['length'] - # weight by travel time - # weighted_edges_df['weight'] = edges_df['t_avg'] - # weight by travel time - # weighted_edges_df['weight'] = (edges_df['t_avg'] - # - edges_df['fft']) * 0.5 + edges_df['length']*0.1 - # + cost_factor*edges_df['length']*0.1*( - # edges_df['is_highway']) - # 10 yen per 100 m --> 0.1 yen per m - weighted_edges_df['weight'] = edges_df['t_avg'] - # weighted_edges_df['weight'] = np.where( - # weighted_edges_df['weight']<0.1, 0.1, - # weighted_edges_df['weight']) - - # traffic assignment with truncated path - ( - edges_df, - od_residual_ss_list, - trip_info, - agents_path, - ) = substep_assignment( - nodes_df=nodes_df, - weighted_edges_df=weighted_edges_df, - od_ss=od_ss, - quarter_demand=quarter_demand, - assigned_demand=assigned_demand, - quarter_counts=quarter_counts, - trip_info=trip_info, - agent_time_limit=agent_time_limit, - sample_interval=sample_interval, - agents_path=agents_path, - hour=hour, - quarter=quarter, - ss_id=ss_id, - alpha_f=alpha_f, - beta_f=beta_f, - two_way_edges=two_way_edges, - ) - - od_residual_list += od_residual_ss_list - # write_edge_vol(edges_df=edges_df, - # simulation_outputs=simulation_outputs, - # quarter=quarter, - # hour=hour, - # scen_nm='ss{}_{}'.format(ss_id, scen_nm)) - logging.info( - f'HR {hour} QT {quarter} SS {ss_id}' - ' finished, max vol ' - f'{np.max(edges_df["vol_true"])}, ' - f'time {time.time() - time_ss_0}' - ) - - # write quarterly results - edges_df['vol_tot'] += edges_df['vol_true'] - if True: # hour >=16 or (hour==15 and quarter==3): - write_edge_vol( - edges_df=edges_df, - simulation_outputs=simulation_outputs, - quarter=quarter, - hour=hour, - scen_nm=scen_nm, - ) - - if hour % 3 == 0: - trip_info_df = pd.DataFrame( - [ - [ - trip_key[0], - trip_key[1], - trip_key[2], - trip_value[0], - trip_value[1], - trip_value[2], - trip_value[3], - trip_value[4], - trip_value[5], - ] - for trip_key, trip_value in trip_info.items() - ], - columns=[ - 'agent_id', - 'origin_nid', - 'destin_nid', - 'travel_time', - 'travel_time_used', - 'stop_nid', - 'stop_hour', - 'stop_quarter', - 'stop_ssid', - ], + od_ss = od_quarter[od_quarter['ss_id'] == ss_id] + assigned_demand += od_ss.shape[0] + if assigned_demand == 0: + continue + # calculate weight + weighted_edges_df = edges_df.copy() + # weight by travel distance + # weighted_edges_df['weight'] = edges_df['length'] + # weight by travel time + # weighted_edges_df['weight'] = edges_df['t_avg'] + # weight by travel time + # weighted_edges_df['weight'] = (edges_df['t_avg'] + # - edges_df['fft']) * 0.5 + edges_df['length']*0.1 + # + cost_factor*edges_df['length']*0.1*( + # edges_df['is_highway']) + # 10 yen per 100 m --> 0.1 yen per m + weighted_edges_df['weight'] = edges_df['t_avg'] + # weighted_edges_df['weight'] = np.where( + # weighted_edges_df['weight']<0.1, 0.1, + # weighted_edges_df['weight']) + # traffic assignment with truncated path + ( + edges_df, + od_residual_ss_list, + trip_info, + agents_path, + ) = self.substep_assignment( + nodes_df=nodes_df, + weighted_edges_df=weighted_edges_df, + od_ss=od_ss, + quarter_demand=quarter_demand, + assigned_demand=assigned_demand, + quarter_counts=quarter_counts, + trip_info=trip_info, + agent_time_limit=agent_time_limit, + sample_interval=sample_interval, + agents_path=agents_path, + hour=hour, + quarter=quarter, + ss_id=ss_id, + alpha_f=alpha_f, + beta_f=beta_f, + two_way_edges=two_way_edges, ) - trip_info_df.to_csv( - simulation_outputs / 'trip_info' - f'/trip_info_{scen_nm}_hr{hour}' - '.csv', - index=False, + od_residual_list += od_residual_ss_list + # write_edge_vol(edges_df=edges_df, + # simulation_outputs=simulation_outputs, + # quarter=quarter, + # hour=hour, + # scen_nm='ss{}_{}'.format(ss_id, scen_nm)) + logging.info( + f'HR {hour} QT {quarter} SS {ss_id}' + ' finished, max vol ' + f'{np.max(edges_df["vol_true"])}, ' + f'time {time.time() - time_ss_0}' ) - # output individual trip travel time and stop location + # write quarterly results + edges_df['vol_tot'] += edges_df['vol_true'] + if save_edge_vol: # hour >=16 or (hour==15 and quarter==3): + self.write_edge_vol( + edges_df=edges_df, + simulation_outputs=simulation_outputs, + quarter=quarter, + hour=hour, + scen_nm=scen_nm, + ) - trip_info_df = pd.DataFrame( + trip_info_df = pd.DataFrame( + [ [ - [ - trip_key[0], - trip_key[1], - trip_key[2], - trip_value[0], - trip_value[1], - trip_value[2], - trip_value[3], - trip_value[4], - trip_value[5], - ] - for trip_key, trip_value in trip_info.items() - ], - columns=[ - 'agent_id', - 'origin_nid', - 'destin_nid', - 'travel_time', - 'travel_time_used', - 'stop_nid', - 'stop_hour', - 'stop_quarter', - 'stop_ssid', - ], - ) - # Add the no path OD to the trip info - trip_info_no_path = od_no_path.drop( - columns=[ - col - for col in od_no_path.columns - if col not in ['agent_id', 'origin_nid', 'destin_nid'] + trip_key[0], + trip_key[1], + trip_key[2], + trip_value[0], + trip_value[1], + trip_value[2], + trip_value[3], + trip_value[4], + trip_value[5], ] - ) - trip_info_no_path['travel_time'] = 360000 - trip_info_no_path['travel_time_used'] = np.nan - trip_info_no_path['stop_nid'] = np.nan - trip_info_no_path['stop_hour'] = np.nan - trip_info_no_path['stop_quarter'] = np.nan - trip_info_no_path['stop_ssid'] = np.nan - trip_info_df = pd.concat( - [trip_info_df, trip_info_no_path], ignore_index=True - ) - + for trip_key, trip_value in trip_info.items() + ], + columns=[ + 'agent_id', + 'origin_nid', + 'destin_nid', + 'travel_time', + 'travel_time_used', + 'stop_nid', + 'stop_hour', + 'stop_quarter', + 'stop_ssid', + ], + ) + # If there are trips incompleted mark them as np.nan + incomplete_trips_agent_id = [x[0] for x in od_residual_list] + trip_info_df.loc[trip_info_df.agent_id.isin(incomplete_trips_agent_id),'travel_time_used'] = 'inf' + # Add the no path OD to the trip info + trip_info_no_path = od_no_path.drop( + columns=[ + col + for col in od_no_path.columns + if col not in ['agent_id', 'origin_nid', 'destin_nid'] + ] + ) + trip_info_no_path['travel_time'] = 360000 + trip_info_no_path['travel_time_used'] = 'inf' + trip_info_no_path['stop_nid'] = np.nan + trip_info_no_path['stop_hour'] = np.nan + trip_info_no_path['stop_quarter'] = np.nan + trip_info_no_path['stop_ssid'] = np.nan + trip_info_df = pd.concat( + [trip_info_df, trip_info_no_path], ignore_index=True + ) + if save_edge_vol: trip_info_df.to_csv( simulation_outputs / 'trip_info' / f'trip_info_{scen_nm}.csv', index=False, ) - write_final_vol( - edges_df=edges_df, - simulation_outputs=simulation_outputs, - quarter=quarter, - hour=hour, - scen_nm=scen_nm, - ) + self.write_final_vol( + edges_df=edges_df, + simulation_outputs=simulation_outputs, + quarter=quarter, + hour=hour, + scen_nm=scen_nm, + ) + return trip_info_df + # @abstractmethod + def system_performance( # noqa: C901, D102, PLR0915 + self, state # noqa: ARG002 + ) -> None: # Move the CSV creation here network_edges = self.csv_files['network_edges'] network_nodes = self.csv_files['network_nodes'] @@ -1051,7 +1038,7 @@ def assignment( # noqa: C901, PLR0913 t_od_1 = time.time() logging.info('%d sec to read %d OD pairs', t_od_1 - t_od_0, od_all.shape[0]) # run residual_demand_assignment - assignment( + self.assignment( edges_df=edges_df, nodes_df=nodes_df, od_all=od_all, @@ -1251,6 +1238,110 @@ def update_nodes_file(nodes, det): ) od_df.to_csv('updated_od.csv') return od_df + + +class pyrecodes_residual_demand(TransportationPerformance): + def __init__( + self, + edges_file, + nodes_file, + od_pre_file, + hour_list, + results_dir, + two_way_edges=False + ): + # Prepare edges and nodes files + edges_gdf = gpd.read_file(edges_file).to_crs(epsg=6500) + if 'length' not in edges_gdf.columns: + edges_gdf['length'] = edges_gdf['geometry'].apply(lambda x: x.length) + edges_gdf = edges_gdf.to_crs(epsg=4326) + if two_way_edges: + edges_gdf_copy = edges_gdf.copy() + edges_gdf_copy['StartNode'] = edges_gdf['EndNode'] + edges_gdf_copy['EndNode'] = edges_gdf['StartNode'] + edges_gdf = pd.concat([edges_gdf, edges_gdf_copy], ignore_index=True) + edges_gdf = edges_gdf.reset_index() + edges_gdf = edges_gdf.rename( + columns={ + 'index': 'uniqueid', + 'StartNode': 'start_nid', + 'EndNode': 'end_nid', + 'NumOfLanes': 'lanes', + 'MaxMPH': 'maxspeed', + } + ) + # Assume that the capacity for each lane is 1800 + edges_gdf['capacity'] = edges_gdf['lanes'] * 1800 + edges_gdf['normal_capacity'] = edges_gdf['capacity'] + edges_gdf['normal_maxspeed'] = edges_gdf['maxspeed'] + edges_gdf['start_nid'] = edges_gdf['start_nid'].astype(int) + edges_gdf['end_nid'] = edges_gdf['end_nid'].astype(int) + + nodes_gdf = gpd.read_file(nodes_file) + nodes_gdf = nodes_gdf.rename(columns={'nodeID': 'node_id'}) + nodes_gdf['y'] = nodes_gdf['geometry'].apply(lambda x: x.y) + nodes_gdf['x'] = nodes_gdf['geometry'].apply(lambda x: x.x) + + edges_gdf['fft'] = edges_gdf['length'] / edges_gdf['maxspeed'] * 2.23694 + edges_gdf['normal_fft'] = ( + edges_gdf['length'] / edges_gdf['normal_maxspeed'] * 2.23694 + ) + edges_gdf = edges_gdf.sort_values(by='fft', ascending=False).drop_duplicates( + subset=['start_nid', 'end_nid'], keep='first' + ) + edges_gdf['edge_str'] = ( + edges_gdf['start_nid'].astype('str') + + '-' + + edges_gdf['end_nid'].astype('str') + ) + edges_gdf['t_avg'] = edges_gdf['fft'] + edges_gdf['u'] = edges_gdf['start_nid'] + edges_gdf['v'] = edges_gdf['end_nid'] + edges_gdf = edges_gdf.set_index('edge_str') + + # delete geometry columns to save memory however, traffic assignment need to be updated accordingly + # edges_gdf = edges_gdf.drop(columns=['geometry']) + # nodes_gdf = nodes_gdf.drop(columns=['geometry']) + + + self.edges_df = edges_gdf + self.nodes_df = nodes_gdf + + self.od_pre_file = od_pre_file + self.hour_list = hour_list + self.results_dir = results_dir + self.two_way_edges = two_way_edges + # If needed run on undamaged network here + def simulate( + self, + r2d_dict + ): + def update_edges(edges, r2d_dict): + return edges + + od_matrix = self.compute_od(r2d_dict) + + edges_df = update_edges(self.edges_df, r2d_dict) + + # closed_links is simulated as normal links with very high fft + closed_links = pd.DataFrame([], columns=['uniqueid']) + trip_info_df = self.assignment( + edges_df = edges_df, + nodes_df=self.nodes_df, + od_all=od_matrix, + simulation_outputs=self.results_dir, + hour_list=self.hour_list, + quarter_list=[0, 1, 2, 3, 4, 5], + closure_hours=self.hour_list, + closed_links=closed_links, + two_way_edges=self.two_way_edges, + save_edge_vol = False + ) + return trip_info_df + + def compute_od(self, r2d_dict): + od_matrix = pd.read_csv(self.od_pre_file) + return od_matrix # def get_graph_network(self, # det_file_path: str, diff --git a/modules/tools/BRAILS/runBrailsTransp.py b/modules/tools/BRAILS/runBrailsTransp.py index b7c3cf418..2c3a60601 100644 --- a/modules/tools/BRAILS/runBrailsTransp.py +++ b/modules/tools/BRAILS/runBrailsTransp.py @@ -72,6 +72,7 @@ def runBrails( # noqa: N802, D103 minimumHAZUS, # noqa: N803 maxRoadLength, # noqa: N803 lengthUnit, # noqa: N803 + saveTrafficSimulationAttr, # noqa: N803 ): # Initialize TranspInventoryGenerator: invGenerator = TranspInventoryGenerator( # noqa: N806 @@ -83,7 +84,8 @@ def runBrails( # noqa: N802, D103 # Combine and format the generated inventory to SimCenter transportation network inventory json format invGenerator.combineAndFormat_HWY( - minimumHAZUS=minimumHAZUS, maxRoadLength=maxRoadLength, lengthUnit=lengthUnit + minimumHAZUS=minimumHAZUS, maxRoadLength=maxRoadLength, lengthUnit=lengthUnit, + connectivity=saveTrafficSimulationAttr ) @@ -99,6 +101,7 @@ def main(args): # noqa: D103 ) parser.add_argument('--maxRoadLength', default=100, type=float) parser.add_argument('--lengthUnit', default='m', type=str) + parser.add_argument('--saveTrafficSimulationAttr', default=False, type=str2bool, nargs='?', const=True) args = parser.parse_args(args) @@ -115,6 +118,7 @@ def main(args): # noqa: D103 args.minimumHAZUS, args.maxRoadLength, args.lengthUnit, + args.saveTrafficSimulationAttr, ) log_msg('BRAILS successfully generated the requested transportation inventory')