-
Notifications
You must be signed in to change notification settings - Fork 1
/
topsis.py
78 lines (62 loc) · 2.67 KB
/
topsis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import pandas as pd
from criterion import Criterion
from normalize import normalize
def compute_distances(df: pd.DataFrame, weights: dict[str, Criterion]):
"""
Return a dataframe with the distances to the ideal and anti-ideal solutions appended
"""
ideal_distances_df: pd.DataFrame = df.copy()
anti_ideal_distances_df: pd.DataFrame = df.copy()
for criterion in weights.keys():
ideal_distances_df[criterion] = ideal_distances_df[criterion].apply(
lambda x: (max(ideal_distances_df[criterion]) - x)
)
anti_ideal_distances_df[criterion] = anti_ideal_distances_df[criterion].apply(
lambda x: (x - min(anti_ideal_distances_df[criterion]))
)
return (ideal_distances_df, anti_ideal_distances_df)
def compute_similarity(
ideal_distances_df: pd.DataFrame,
anti_ideal_distances_df: pd.DataFrame,
weights: dict[str, Criterion],
):
"""
Return a dataframe with the similarity of each solution to the ideal and anti-ideal solutions
"""
similarity_df: pd.DataFrame = pd.DataFrame(columns=["criterion", "similarity"])
names: pd.Series = ideal_distances_df[list(ideal_distances_df.columns)[0]]
for i in range(len(ideal_distances_df)):
ideal = 0
anti_ideal = 0
for criterion in weights.keys():
ideal += ideal_distances_df.loc[i][criterion]
anti_ideal += anti_ideal_distances_df.loc[i][criterion]
similarity = round(anti_ideal / (ideal + anti_ideal), 4)
similarity_df.loc[i] = [names[i], similarity]
# Order the dataframe by similarity
similarity_df.sort_values(by="similarity", ascending=True, inplace=True)
return similarity_df
if __name__ == "__main__":
input_path = "data/"
output_path = "output/"
initial_solutions: pd.DataFrame = pd.read_csv(
output_path + "preanalysed_solutions_satisfaction.csv"
#output_path + "preanalysed_solutions_dominance.csv"
)
weights: dict = {
"C1": Criterion("minimize", weight=1),
"C2": Criterion("minimize", weight=2),
"C3": Criterion("maximize", weight=4),
"C4": Criterion("minimize", weight=5),
"C5": Criterion("minimize", weight=3),
"C6": Criterion("maximize", weight=5),
"C7": Criterion("maximize", weight=4),
}
normalized_solutions: pd.DataFrame = normalize(initial_solutions, weights)
(ideal_distances, anti_ideal_distances_df) = compute_distances(
normalized_solutions, weights
)
similarity_df: pd.DataFrame = compute_similarity(
ideal_distances, anti_ideal_distances_df, weights
)
similarity_df.to_csv(output_path + "topsis_solutions.csv", index=False)