Skip to content

Commit

Permalink
Make exposure values and checks case-insensitive
Browse files Browse the repository at this point in the history
  • Loading branch information
ptormene committed Jul 18, 2023
1 parent 9097b1a commit a052f40
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 17 deletions.
6 changes: 3 additions & 3 deletions openquake/risklib/asset.py
Original file line number Diff line number Diff line change
Expand Up @@ -757,7 +757,7 @@ def check_exposure_for_infr_conn_analysis(df, fname):
# Log a warning if node weights are present and they are not all '1',
# because handling weights in the nodes is not implemented yet
if 'weight' in exposure_columns: # 'weight' is not mandatory
if not (df[df.type == 'node']['weight'] == '1').all():
if not (df[df.type.str.lower() == 'node']['weight'] == '1').all():
logging.warning(
f'Node weights different from 1 present in {fname} will'
f' be ignored. Handling node weights is not implemented yet.')
Expand All @@ -772,8 +772,8 @@ def check_exposure_for_infr_conn_analysis(df, fname):

# Raise an error if 'purpose' contains at least one 'TAZ' value and at
# least a value in ['source'. 'demand']
purpose_values = list(df['purpose'])
if 'TAZ' in purpose_values and ('source' in purpose_values
purpose_values = set(df['purpose'].str.lower())
if 'taz' in purpose_values and ('source' in purpose_values
or 'demand' in purpose_values):
raise InvalidFile(
f'Column "purpose" of {fname} can not contain at the same time'
Expand Down
34 changes: 20 additions & 14 deletions openquake/risklib/connectivity.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,13 +61,14 @@ def classify_nodes(exposure_df):
# TAZ is the acronym of "Traffic Analysis Zone"
# user can write both as well
TAZ_nodes = exposure_df.loc[
exposure_df.purpose.isin(["TAZ", "both"])].index.to_list()
exposure_df.purpose.str.lower().isin(["taz", "both"])].index.to_list()

source_nodes = exposure_df.loc[
exposure_df.purpose == "source"].index.to_list()
exposure_df.purpose.str.lower() == "source"].index.to_list()
demand_nodes = exposure_df.loc[
exposure_df.purpose == "demand"].index.to_list()
eff_nodes = exposure_df.loc[exposure_df.type == "node"].index.to_list()
exposure_df.purpose.str.lower() == "demand"].index.to_list()
eff_nodes = exposure_df.loc[
exposure_df.type.str.lower() == "node"].index.to_list()

# We should raise an error if the exposure nodes contain at the same time
# taz/both and demand/supply
Expand Down Expand Up @@ -102,20 +103,21 @@ def get_graph_type(exposure_df):
def create_original_graph(exposure_df, g_type):
# Create the original graph and add edge and node attributes.
G_original = nx.from_pandas_edgelist(
exposure_df.loc[exposure_df.type == "edge"],
exposure_df.loc[exposure_df.type.str.lower() == "edge"],
source="start_node",
target="end_node",
edge_attr=True, create_using=getattr(nx, g_type)()
)
# This is done for the cases where there might be a disconnected node with
# no edges and are not added in the G_original previously
for _, row in exposure_df.loc[exposure_df.type == "node"].iterrows():
for _, row in exposure_df.loc[
exposure_df.type.str.lower() == "node"].iterrows():
if row["id"] not in G_original.nodes:
G_original.add_node(row["id"], **row)
# Adding the attribute of the nodes
nx.set_node_attributes(
G_original, exposure_df.loc[
exposure_df.type == "node"].to_dict("index")
exposure_df.type.str.lower() == "node"].to_dict("index")
)

return G_original
Expand Down Expand Up @@ -279,9 +281,9 @@ def cleanup_graph(G_original, event_damage_df, g_type):
G = G_original.copy()

nodes_damage_df = event_damage_df.loc[
event_damage_df.type == "node"].droplevel(level=0)
event_damage_df.type.str.lower() == "node"].droplevel(level=0)
edges_damage_df = event_damage_df.loc[
event_damage_df.type == "edge"].droplevel(level=0)
event_damage_df.type.str.lower() == "edge"].droplevel(level=0)

# Updating the graph to remove damaged edges and nodes
nonfunctional_edges_df = edges_damage_df.loc[
Expand Down Expand Up @@ -418,8 +420,9 @@ def ELWCLPCLCCL_demand(exposure_df, G_original, eff_nodes, demand_nodes,
# To store the information of the performance indicators at connectivity
# level
dem_cl = exposure_df[
exposure_df['purpose'] == 'demand'].iloc[:, 0:1]
node_el = exposure_df[exposure_df['type'] == 'node'].iloc[:, 0:1]
exposure_df['purpose'].str.lower() == 'demand'].iloc[:, 0:1]
node_el = exposure_df[
exposure_df['type'].str.lower() == 'node'].iloc[:, 0:1]

ccl_table = pd.DataFrame({'id': demand_nodes})
pcl_table = pd.DataFrame({'id': demand_nodes})
Expand Down Expand Up @@ -563,8 +566,10 @@ def ELWCLPCLloss_TAZ(exposure_df, G_original, TAZ_nodes,

# To store the information of the performance indicators at connectivity
# level
taz_cl = exposure_df[exposure_df['purpose'] == 'TAZ'].iloc[:, 0:1]
node_el = exposure_df[exposure_df['type'] == 'node'].iloc[:, 0:1]
taz_cl = exposure_df[
exposure_df['purpose'].str.lower() == 'taz'].iloc[:, 0:1]
node_el = exposure_df[
exposure_df['type'].str.lower() == 'node'].iloc[:, 0:1]

pcl_table = pd.DataFrame({'id': TAZ_nodes})
wcl_table = pd.DataFrame({'id': TAZ_nodes})
Expand Down Expand Up @@ -687,7 +692,8 @@ def EL_node(exposure_df, G_original, eff_nodes, damage_df, g_type):

# To store the information of the performance indicators at connectivity
# level
node_el = exposure_df[exposure_df['type'] == 'node'].iloc[:, 0:1]
node_el = exposure_df[
exposure_df['type'].str.lower() == 'node'].iloc[:, 0:1]

eff_table = pd.DataFrame({'id': eff_nodes})
eff_table.set_index("id", inplace=True)
Expand Down

0 comments on commit a052f40

Please sign in to comment.