Professional Documents
Culture Documents
Gid
Gid
Gid
import numpy as np
import pandas as pd
from pandas.core.internals.blocks import F
import numpy as np
import pandas as pd
from datetime import datetime
import sklearn.metrics
from sklearn import metrics
from sklearn.metrics import f1_score
from sklearn.metrics import mean_squared_error
#AttackFree= [0:2369397]
#DoS= [0:656578]
#Fuzzy= )[0:591989]
#Impersonation= [0:995471]
AttackFree=
pd.read_csv('/content/drive/MyDrive/CAN_VehicleAttack_ML/CAN_vehicleAttack_
dataset1/CAN_Attack_dataset1-15F/CAN_Attack_dataset1_15
F/github_folder/extracted feature/CAN_attack_dataset1/Attack_free new.csv')
[0:236939]
DoS=
pd.read_csv('/content/drive/MyDrive/CAN_VehicleAttack_ML/CAN_vehicleAttack_
dataset1/CAN_Attack_dataset1-15F/CAN_Attack_dataset1_15
F/github_folder/extracted feature/CAN_attack_dataset1/DoS_Attack_new.csv')
[0:65657]
Fuzzy=
pd.read_csv('/content/drive/MyDrive/CAN_VehicleAttack_ML/CAN_vehicleAttack_
dataset1/CAN_Attack_dataset1-15F/CAN_Attack_dataset1_15
F/github_folder/extracted
feature/CAN_attack_dataset1/Fuzzy_Attack_New.csv')[0:59198]
Impersonation=
pd.read_csv('/content/drive/MyDrive/CAN_VehicleAttack_ML/CAN_vehicleAttack_
dataset1/CAN_Attack_dataset1-1
#Class1: Attack-Free
#Class2: DoS
#Class3: Fuzzy
#Class4:Impersonation
label1 = []
for i in range(len(AttackFree)):
label1.append(1)
label2 = []
for i in range(len(DoS)):
label2.append(2)
label3 = []
for i in range(len(Fuzzy)):
label3.append(3)
label4 = []
for i in range(len(Impersonation)):
label4.append(4)
# Splitting the dataset into the Training set and Test set
X_Train, X_Test, Y_Train, Y_Test = train_test_split(Dataset, label,
test_size = 0.25, random_state = 0)
# Feature Scaling
sc_X = StandardScaler()
X_Train = sc_X.fit_transform(X_Train)
X_Test = sc_X.transform(X_Test)
# do your work here
# SVM Classifier
start_time_1 = datetime.now()
classifier_svm.fit(X_Train, Y_Train)
end_time_1 = datetime.now()
print('Training Duration: {}'.format(end_time_1 - start_time_1))
# SVM Testing
start_time_2 = datetime.now()
# Predicting the test set results
Y_Pred_svm = classifier_svm.predict(X_Test)
end_time_2 = datetime.now()
print('Testing Duration: {}'.format(end_time_2 - start_time_2))
import seaborn as sn
import matplotlib.pyplot as plt
plt.rc('font', **font)
plt.figure(figsize = (7,5))
sn.heatmap(df_cm_svm, annot=True, fmt='.0f', cmap='Blues')
plt.ylabel('True Class')
plt.xlabel('Predicted Class')
plt.show()
print("Accuracy:",metrics.accuracy_score(Y_Test, Y_Pred_svm))
print("Precision:",metrics.precision_score(Y_Test, Y_Pred_svm,
average='micro'))
print("Recall:",metrics.recall_score(Y_Test, Y_Test, average='micro'))
print("F1_Score:",metrics.f1_score(Y_Test, Y_Test, average='micro'))
print("Cohen_Kappa_Score:",sklearn.metrics.cohen_kappa_score(Y_Test,
Y_Pred_svm, labels=None, weights=None, sample_weight=None))
#print("MSE:",metrics.mean_squared_error(Y_Test,Y_Pred_svm))
print(classification_report(Y_Test, Y_Pred_svm))
#DT classifier
# Fitting the classifier into the Training set:Decision Tree (DT) Training
classifier_dt = tree.DecisionTreeClassifier(criterion='gini',
splitter='best',random_state=0, max_depth=None)
start_time_1 = datetime.now()
classifier_dt.fit(X_Train, Y_Train)
end_time_1 = datetime.now()
print('Training Duration: {}'.format(end_time_1 - start_time_1))
#DT Testing
start_time_2 = datetime.now()
# Predicting the test set results
Y_Pred_dt = classifier_dt.predict(X_Test)
end_time_2 = datetime.now()
print('Testing Duration: {}'.format(end_time_2 - start_time_2))
Y_Pred_dt
import seaborn as sn
import matplotlib.pyplot as plt
plt.rc('font', **font)
plt.figure(figsize = (7,5))
sn.heatmap(df_cm_dt, annot=True, fmt='.0f', cmap='Blues')
plt.ylabel('True Class')
plt.xlabel('Predicted Class')
plt.show()
#evaluation for DT
print("Accuracy:",metrics.accuracy_score(Y_Test, Y_Pred_dt))
print("Precision:",metrics.precision_score(Y_Test, Y_Pred_dt,
average='micro'))
print("Recall:",metrics.recall_score(Y_Test, Y_Test, average='micro'))
print("F1_Score:",metrics.f1_score(Y_Test, Y_Test, average='micro'))
print("Cohen_Kappa_Score:",sklearn.metrics.cohen_kappa_score(Y_Test,
Y_Pred_dt, labels=None, weights=None, sample_weight=None))
#print("MSE:",metrics.mean_squared_error(Y_Test,Y_Pred_svm))
# Individual Clasification- DT
print(classification_report(Y_Test, Y_Pred_dt))
# KNN classifier
#Fitting the classifier into the Training set:K-nearest Neighbour Training
#(n_neighbors=5, *, weights='uniform', algorithm='auto', leaf_size=30, p=2,
metric='minkowski', metric_params=None, n_jobs=None)
#metric (Manhattan Distance, minkowski distance, Euclidean Distance, Cosine
Distance, Jaccard Distance, Hamming Distance)
#n_neighbors = 20
classifier_knn = neighbors.KNeighborsClassifier(n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski',
metric_params=None, n_jobs=None)
start_time_1 = datetime.now()
classifier_knn.fit(X_Train, Y_Train)
end_time_1 = datetime.now()
print('Training Duration: {}'.format(end_time_1 - start_time_1))
# KNN Testing
start_time_2 = datetime.now()
# Predicting the test set results
Y_Pred_knn = classifier_knn.predict(X_Test)
end_time_2 = datetime.now()
print('Testing Duration: {}'.format(end_time_2 - start_time_2))
import seaborn as sn
import matplotlib.pyplot as plt
plt.rc('font', **font)
plt.figure(figsize = (7,5))
sn.heatmap(df_cm_knn, annot=True, fmt='.0f', cmap='Blues')
plt.ylabel('True Class')
plt.xlabel('Predicted Class')
plt.show()
print(classification_report(Y_Test, Y_Pred_knn))