ML Final-1

You might also like

Download as pdf or txt
Download as pdf or txt
You are on page 1of 7

Find-S

import pandas as pd

import numpy as np

data=pd.read_csv("ENJOYSPORT.csv")

arr=np.array(data)[:,:-1]

target=np.array(data)[:,-1]

def train(c,t):

for i,val in enumerate(t):

if val ==1:

sp_hy=c[i].copy()

break

for i,val in enumerate(c):

if t[i] ==1:

for x in range(len(sp_hy)):

if val[x]!= sp_hy[x]:

sp_hy[x]= '?'

else:

pass

return sp_hy

print(train(c,target))

Candidate Elimination

import numpy as np

import pandas as pd

data=pd.read_csv("ENJOYSPORT (1).csv")

x = np.array(data.iloc[:,0:-1])

target = np.array(data.iloc[:,-1])

def learn(concepts, target):

specific_h = concepts[0].copy()

print( specific_h)

general_h = [["?" for i in range(len(specific_h))] for i in range(len(specific_h))]

print("\nGeneric Boundary: ",general_h)

for i, h in enumerate(concepts):

print("\nInstance", i+1 , "is ", h)

if target[i] == 1:

print("Instance is Positive ")

for x in range(len(specific_h)):

if h[x]!= specific_h[x]:
specific_h[x] ='?'

general_h[x][x] ='?'

if target[i] == 0:

print("Instance is Negative ")

for x in range(len(specific_h)):

if h[x]!= specific_h[x]:

general_h[x][x] = specific_h[x]

else:

general_h[x][x] = '?'

print("Specific Bundary after ", i+1, "Instance is ", specific_h)

print("Generic Boundary after ", i+1, "Instance is ", general_h)

print("\n")

indices = [i for i, val in enumerate(general_h) if val == ['?', '?', '?', '?', '?', '?']]

for i in indices:

general_h.remove(['?', '?', '?', '?', '?', '?'])

return specific_h, general_h

s_final, g_final = learn(x, target)

print("Final Specific_h: ", s_final, sep="\n")

print("Final General_h: ", g_final, sep="\n")

Simple Linear Regresssion

import pandas as pd

import numpy as np

import matplotlib.pyplot as plt

data=pd.read_csv("Salary.csv")

x=data.iloc[:,:-1]

y=data.iloc[:,-1]

from sklearn.model_selection import train_test_split

x_train,x_test,y_train,y_test = train_test_split(x, y, test_size = 1/3, random_state=0)

print(np.size(x_train))

print(np.size(x_test))

print(np.size(y_train))

print(np.size(y_test))

from sklearn.linear_model import LinearRegression

reg=LinearRegression()

reg.fit(x_train,y_train)

plt.scatter(x_train, y_train, color = 'red')

plt.plot(x_train, reg.predict(x_train), color = 'blue')

plt.title=('Salary vs Experiance')
plt.xlabel('year of Experiance')

plt.ylabel('Salary')

MultiReggression

import pandas as pd

import numpy as np

from sklearn.model_selection import train_test_split

import matplotlib.pyplot as plt

from sklearn.linear_model import LinearRegression

from sklearn.metrics import mean_squared_error

data=pd.read_csv("USA_Housing (1).csv")

x=data.drop(['Price', 'Address'],axis = "columns")

y=data["Price"]

from sklearn.model_selection import train_test_split

x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=1/3,random_state=0)from sklearn.linear_model import


LinearRegression

reg=LinearRegression()

reg.fit(x_train,y_train)

y_pred=reg.predict(x_test)

print("Mean square Error",mean_squared_error(y_pred,y_test))

print("Slope",reg.coef_)

print("Intersept",reg.intercept_)

Decision Tree

import pandas as pd

import numpy as np

data=pd.read_csv("play_tennis (1).csv")

outlook=pd.get_dummies(data["outlook"])

humidity=pd.get_dummies(data["humidity"])

wind=pd.get_dummies(data["wind"])

temperature=pd.get_dummies(data["temperature"])

play=pd.get_dummies(data["play"])

x=data.drop(["outlook","temperature","humidity","wind","play"], axis="columns", inplace=True)

d=pd.concat([outlook,temperature,humidity,wind,play],axis=1)

x=d.drop(["yes","no"],axis="columns")

y=d['yes']

from sklearn.model_selection import train_test_split

x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3)

from sklearn.tree import DecisionTreeRegressor


model=DecisionTreeRegressor()

model.fit(x_train,y_train)

y_predict=model.predict(x_test)

y_predict

from sklearn.metrics import accuracy_score, classification_report, confusion_matrix

score=accuracy_score(y_test,y_predict)

print(classification_report(y_test,y_predict))

print(confusion_matrix(y_test,y_predict))

Perceptron

import seaborn as sns

import matplotlib.pyplot as plt

from sklearn.neural_network import MLPClassifier

from sklearn.datasets import load_breast_cancer

from sklearn.model_selection import train_test_split

from sklearn.preprocessing import StandardScaler

from sklearn.metrics import accuracy_score, classification_report, confusion_matrix

cancer_data = load_breast_cancer()

X, y = cancer_data.data, cancer_data.target

X_train, X_test, y_train, y_test = train_test_split(

X, y, test_size=0.2, random_state=42)

scaler = StandardScaler()

X_train = scaler.fit_transform(X_train)

X_test = scaler.transform(X_test)

mlp = MLPClassifier(hidden_layer_sizes=(64, 32),

max_iter=1000, random_state=42)

mlp.fit(X_train, y_train)

y_pred = mlp.predict(X_test)

accuracy = accuracy_score(y_test, y_pred)

print(f"Accuracy: {accuracy:.2f}")

class_report = classification_report(y_test, y_pred)

print("Classification Report:\n", class_report)

cf = confusion_matrix(y_test, y_pred)

sns.heatmap(cf, annot = True)

plt.xlabel("y_pred")

plt.ylabel("y_test")

MLP

import pandas as pd
import numpy as np

import matplotlib.pyplot as plt

data=pd.read_csv("Iris.csv")

data.head()

data.describe()

data.info()

data.tail()

x=data[::-1]

x.head()

x=data.drop(["Species"], axis="columns")

x.head()

y=data["Species"]

y.head()

from sklearn.model_selection import train_test_split

x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3)

from sklearn.preprocessing import StandardScaler

scaler=StandardScaler()

scaler.fit(x_train)

x_train = scaler.transform(x_train)

x_test = scaler.transform(x_test)

from sklearn.neural_network import MLPClassifier

mlp = MLPClassifier(hidden_layer_sizes=(10,10,10), max_iter=1000)

mlp.fit(x_train,y_train.values.ravel())

from sklearn import preprocessing

le= preprocessing.LabelEncoder()

y=le.fit_transform(y_train.values.ravel())

Y_predict= mlp.predict(x_test)

Y_predict

from sklearn.metrics import accuracy_score,classification_report,confusion_matrix

cm = confusion_matrix(y_test,Y_predict)

print("Accuracy : ", accuracy_score(y_test,Y_predict))

report=classification_report(y_test,Y_predict)

report

KNN-Regression

import pandas as pd

import numpy as np
from sklearn.model_selection import train_test_split

from sklearn.metrics import mean_squared_error

from math import sqrt

data=pd.read_csv("Salary.csv")

data.head(2)

x=data.drop(["Salary"],axis ="columns")

y=data["Salary"]

x_train,y_train,x_test,y_test = train_test_split(x,y,test_size=0.3)

from sklearn import neighbors

model=neighbors.KNeighborsRegressor(n_neighbors=3)

model.fit(x_train,y_train)

y_pred=model.predict(x_test)

print("ERROR :",sqrt(mean_squared_error(y_pred,y_test)))

KNN-classification

import pandas as pd

import numpy as np

from sklearn.datasets import load_iris

d=load_iris()

x=d.data

y=d.target

from sklearn.model_selection import train_test_split

x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3)

from sklearn import neighbors

model=neighbors.KNeighborsClassifier(n_neighbors=7)

model.fit(x_train,y_train)

y_pred=model.predict(x_test)

from sklearn.metrics import mean_squared_error,accuracy_score,classification_report,confusion_matrix

from math import sqrt

print("Error:", sqrt(mean_squared_error(y_test,y_pred)))

print("Accuracy: ",accuracy_score(y_test,y_pred))

print("Classification report:", classification_report(y_test,y_pred))

print("Confusion matrix: ",confusion_matrix(y_test,y_pred))

Naïve Bayes

import numpy as np

import matplotlib.pyplot as plt

import seaborn as sns; sns.set()

from sklearn.datasets import fetch_20newsgroups


data = fetch_20newsgroups()

data.target_names

categories = ['talk.religion.misc', 'soc.religion.christian',

'sci.space', 'comp.graphics']

train = fetch_20newsgroups(subset='train', categories=categories)

test = fetch_20newsgroups(subset='test', categories=categories)

print(train.data[5])

from sklearn.feature_extraction.text import TfidfVectorizer

from sklearn.naive_bayes import MultinomialNB

from sklearn.pipeline import make_pipeline

model = make_pipeline(TfidfVectorizer(), MultinomialNB())

model.fit(train.data, train.target)

labels = model.predict(test.data)

from sklearn.metrics import confusion_matrix

mat = confusion_matrix(test.target, labels)

sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,

xticklabels=train.target_names, yticklabels=train.target_names)

plt.xlabel('true label')

plt.ylabel('predicted label');

mat

def predict_category(s, train=train, model=model):

pred = model.predict([s])

return train.target_names[pred[0]]

predict_category('sending a payload to the ISS')

predict_category('discussing islam vs atheism')

predict_category('determining the screen resolution')

You might also like