Codes

You might also like

Download as docx, pdf, or txt
Download as docx, pdf, or txt
You are on page 1of 6

1.

SVM
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn import metrics

iris= datasets.load_iris()
print("Features: ", iris.feature_names)
print("Labels: ", iris.target_names)
print("Data Shape:" ,iris.data.shape)
print("Data samples:", iris.data[:5])
print("Target label:", iris.target)

X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target,


test_size=0.3,random_state=109)
clf = svm.SVC(kernel='linear')
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)

print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
print("Precision:",metrics.precision_score(y_test, y_pred, average='weighted'))
print("Recall:",metrics.recall_score(y_test, y_pred, average='weighted'))

2. SVC
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pandas as pd
dataset = pd.DataFrame({
'ID': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'Feature1': [0.5, 0.8, 0.3, 0.2, 0.6, 0.4, 0.7, 0.9, 0.1, 0.5],
'Feature2': [0.1, 0.2, 0.6, 0.9, 0.4, 0.3, 0.8, 0.7, 0.5, 0.6],
'Class': [0, 0, 1, 1, 1, 0, 1, 1, 0, 0]
})
target = dataset["Class"]
features = dataset.drop(["ID", "Class"], axis=1)
X_train,X_test,y_train,y_test =
train_test_split(features,target,test_size=0.2,random_state=10)

svc_model = SVC(C=.1, kernel='linear', gamma=1)


svc_model.fit(X_train, y_train)
prediction = svc_model .predict(X_test)

print(svc_model.score(X_train, y_train))
print(svc_model.score(X_test, y_test))

3. NAÏVE BYTES
from sklearn.datasets import load_iris
iris = load_iris()

# store the feature matrix (X) and response vector (y)


X = iris.data
y = iris.target

# splitting X and y into training and testing sets


from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4,
random_state=1)

# training the model on training set


from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train, y_train)

# making predictions on the testing set


y_pred = gnb.predict(X_test)

# comparing actual response values (y_test) with predicted response


values (y_pred)
from sklearn import metrics
print("Gaussian Naive Bayes model accuracy(in %):",
metrics.accuracy_score(y_test, y_pred)*100)

4. KMeans (Elbow method)


from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd

x=[10,20,56,85,41,36,2,5,59,6,3,2,4]
y=[5,9,6,3,5,2,5,4,15,25,63,52,87]
data=list(zip(x,y))
inertia=[]

for i in range(1,13):
kmeans=KMeans(n_clusters=i)
kmeans.fit(data)
inertia.append(kmeans.inertia_)

plt.plot(range(1,13),inertia,marker='o')
plt.title('Elbow method')
plt.xlabel('number of clusters')
plt.ylabel('inertias')
plt.show()
kmeans=KMeans(n_clusters=3)
kmeans.fit(data)
plt.scatter(x,y,c=kmeans.labels_)
plt.show()
5. KMeans(silht score)
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn.metrics import silhouette_score
import pandas as pd

x=[10,20,56,85,41,36,2,5,59,6,3,2,4]
y=[5,9,6,3,5,2,5,4,15,25,63,52,87]
data=list(zip(x,y))
kmeans = KMeans(n_clusters=2)
kmeans.fit(data)
plt.scatter(x, y, c=kmeans.labels_)
plt.show()

score=silhouette_score(data, kmeans.labels_, metric='euclidean')


print('silhouette_score: %.3f' % score)

6. PCA
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.datasets import load_breast_cancer
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA

cancer = load_breast_cancer()
cancer.keys()
print(cancer['DESCR'])
df = pd.DataFrame(cancer['data'],columns=cancer['feature_names'])
df.head()

Scaler = StandardScaler()
Scaler.fit(df)
scaled_data = Scaler.transform(df)

pca = PCA(n_components=2)
pca.fit(scaled_data)

x_pca = pca.transform(scaled_data)
scaled_data.shape
x_pca.shape

plt.figure(figsize=(8,6))
plt.scatter(x_pca[:,0],x_pca[:,1],c=cancer['target'],cmap='plasma')
plt.xlabel('First principal component')
plt.ylabel('Second Principal Component')

pca.components_
df_comp =
pd.DataFrame(pca.components_,columns=cancer['feature_names'])
plt.figure(figsize=(12,6))
sns.heatmap(df_comp,cmap='plasma',)

7. CONFUSION MATRIX
from sklearn.metrics._plot.confusion_matrix import confusion_matrix
#Import the necessary libraries
import numpy as np
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
actual = np.array(['Rain', 'No rain', 'Rain', 'No Rain', 'Rain', 'No Rain', 'Rain', 'No
Rain', 'No Rain', 'No Rain'])
predicted = np.array(['Rain', 'No Rain', 'Rain', 'No Rain', 'Rain', 'Rain', 'Rain',
'Rain', 'No Rain', 'No Rain'])
cm = confusion_matrix(actual,predicted)
sns.heatmap(cm,
annot=True,
fmt='g',
xticklabels=['Rain', 'No Rain'],
yticklabels=['Rain', 'No Rain'])
plt.ylabel('Prediction', fontsize=13)
plt.xlabel('Actual', fontsize=13)
plt.title('Confusion Matrix', fontsize=17)
plt.show()

8. DBSCAN

You might also like