-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpreprocessing.py
166 lines (131 loc) · 6.83 KB
/
preprocessing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import os
import subprocess
import pkg_resources
def library_check():
# Check if the kaggle package is installed
if 'kaggle' in {pkg.key for pkg in pkg_resources.working_set}:
print("The Kaggle package is already installed.")
else:
print("The Kaggle package is not installed. Initiating installation...")
# Install the kaggle package
subprocess.run(["conda", "install", "-c", "conda-forge", "kaggle"])
if 'cuml' or 'cudf' in {pkg.key for pkg in pkg_resources.working_set}:
print("The cuml and cudf package are already installed.")
else:
print("The CuML package is not installed. Initiating installation...")
# Install the kaggle package
subprocess.run(["conda", "install", "--solver=libmamba", "conda-forge", "cuml", "cudf"])
# Check if the kaggle package is installed
if 'matplotlib' in {pkg.key for pkg in pkg_resources.working_set}:
print("The Matplotlib package is already installed.")
else:
print("The Matplotlib package is not installed. Initiating installation...")
# Install the kaggle package
subprocess.run(["conda", "install", "-c", "conda-forge", "matplotlib"])
# Check if the kaggle package is installed
if 'seaborn' in {pkg.key for pkg in pkg_resources.working_set}:
print("The Seaborn package is already installed.")
else:
print("The Seaborn package is not installed. Initiating installation...")
# Install the kaggle package
subprocess.run(["conda", "install", "-c", "conda-forge", "seaborn"])
# Check if the kaggle package is installed
if 'pandas' in {pkg.key for pkg in pkg_resources.working_set}:
print("The Pandas package is already installed.")
else:
print("The Pandas package is not installed. Initiating installation...")
# Install the kaggle package
subprocess.run(["conda", "install", "-c", "conda-forge", "pandas"])
# Check if the kaggle package is installed
if 'numpy' in {pkg.key for pkg in pkg_resources.working_set}:
print("The Numpy package is already installed.")
else:
print("The Numpy package is not installed. Initiating installation...")
# Install the kaggle package
subprocess.run(["conda", "install", "-c", "conda-forge", "numpy"])
# Check if the kaggle package is installed
if 'scikit-learn' in {pkg.key for pkg in pkg_resources.working_set}:
print("The Scikit-Learn package is already installed.")
else:
print("The Scikit-Learn package is not installed. Initiating installation...")
# Install the kaggle package
subprocess.run(["conda", "install", "-c", "conda-forge", "scikit-learn"])
if 'tensorflow' in {pkg.key for pkg in pkg_resources.working_set}:
print("The Tensorflow package is already installed.")
else:
print("The Tensorflow package is not installed. Initiating installation...")
# Install the kaggle package
subprocess.run(["conda", "install", "-c", "conda-forge", "tensorflow"])
def kaggle_api_json():
# Define the path to the kaggle.json file
kaggle_json_path = os.path.expanduser("~/.kaggle/kaggle.json")
# Check if the kaggle.json file exists
if os.path.exists(kaggle_json_path):
print("The API JSON file is found.")
# Set appropriate permissions for kaggle.json
subprocess.run(["chmod", "600", kaggle_json_path])
return True
else:
print("The kaggle.json file is not found in the directory.")
print("Please read the instructions on how to obtain it from the following link: https://www.kaggle.com/docs/api")
return False
def download_raw_csv():
# Download the dataset
subprocess.run(["kaggle", "datasets", "download", "-d", "mohamedamineferrag/edgeiiotset-cyber-security-dataset-of-iot-iiot", "-f", "Edge-IIoTset dataset/Selected dataset for ML and DL/DNN-EdgeIIoT-dataset.csv"])
# Unzip the downloaded dataset
subprocess.run(["unzip", "DNN-EdgeIIoT-dataset.csv.zip", "-d", "CSV"])
# Remove the zip file
os.remove("DNN-EdgeIIoT-dataset.csv.zip")
def preprocessing():
# OBJECTIVE: Dropping data (Columns, duplicated rows, NAN, Null..)
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
# Reading the Datasets' CSV file to a Pandas DataFrame
df = pd.read_csv('CSV/DNN-EdgeIIoT-dataset.csv', low_memory=False)
# Using stratified sampling based on the 'attack_label' column
#df_train, df_sliced = train_test_split(df, test_size=1/3, stratify=df['Attack_label'], random_state=42)
# Columns to drop
drop_columns = ["frame.time", "ip.src_host", "ip.dst_host", "arp.src.proto_ipv4","arp.dst.proto_ipv4",
"http.file_data","http.request.full_uri","icmp.transmit_timestamp",
"http.request.uri.query", "tcp.options","tcp.payload","tcp.srcport",
"tcp.dstport", "udp.port", "mqtt.msg"]
# Dropping unnecessary columns
#df_sliced.drop(drop_columns, axis=1, inplace=True)
#df_train.drop(drop_columns, axis=1, inplace=True)
df.drop(drop_columns, axis=1, inplace=True)
# Dropping NaN values
#df_sliced.dropna(axis=0, how='any', inplace=True)
#df_train.dropna(axis=0, how='any', inplace=True)
df.dropna(axis=0, how='any', inplace=True)
# Dropping duplicate rows
#df_sliced.drop_duplicates(subset=None, keep="first", inplace=True)
#df_train.drop_duplicates(subset=None, keep="first", inplace=True)
df.drop_duplicates(subset=None, keep="first", inplace=True)
# Shuffling the dataframe
#df_sliced = shuffle(df_sliced)
#df_train = shuffle(df_train)
df = shuffle(df)
# OBJECTIVE: Categorical data encoding (Dummy Encoding)
def encode_text_dummy(df, name):
dummies = pd.get_dummies(df[name])
for x in dummies.columns:
dummy_name = f"{name}-{x}"
df[dummy_name] = dummies[x]
df.drop(name, axis=1, inplace=True)
# Encoding categorical features
categorical_features = ['http.request.method', 'http.referer', 'http.request.version', 'dns.qry.name.len',
'mqtt.conack.flags', 'mqtt.protoname', 'mqtt.topic']
#for feature in categorical_features:
#if feature in df_sliced.columns:
#encode_text_dummy(df_sliced, feature)
#for feature in categorical_features:
#if feature in df_train.columns:
#encode_text_dummy(df_train, feature)
for feature in categorical_features:
if feature in df.columns:
encode_text_dummy(df, feature)
#df_sliced.to_csv('test_preprocessed_DNN.csv', encoding='utf-8', index=False)
#df_train.to_csv('train_preprocessed_DNN.csv', encoding='utf-8', index=False)
df.to_csv('CSV/preprocessed_DNN.csv', encoding='utf-8', index=False)