Download as txt, pdf, or txt
Download as txt, pdf, or txt
You are on page 1of 3

#!

/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 3 00:06:38 2021

@author: jaydevraval
"""

#Imports
import pandas as pd
import numpy as np
from sklearn.datasets import make_moons
import matplotlib.pyplot as pyplot
import numdifftools as nd
from sklearn.preprocessing import normalize

#Initialization
trainingsampleSize = 1000
testingsampleSize = 3000
noiseParam = 0.1

trainingData= make_moons(n_samples = trainingsampleSize, noise = noiseParam,


random_state = None) # Training Dataset Created
testingData = make_moons(n_samples = testingsampleSize, noise = noiseParam,
random_state = None) #Testing Dataset Created

featureSize = trainingData[0].shape[1]
trainingData = np.column_stack((trainingData[0], trainingData[1])) #Merging
Features and Label
testingData = np.column_stack((testingData[0], testingData[1])) #Merging Features
and Label

#Preaparting Data For d=0


dist = 0
for i in trainingData:
if i[2] == 1:
if i[1] > dist:
dist = i[1]

for i in trainingData:
if i[2] == 1:
i[1] -= dist

dist = 0
for i in testingData:
if i[2] == 1:
if i[1] > dist:
dist = i[1]

for i in testingData:
if i[2] == 1:
i[1] -= dist

#Normalize Data
trainingData[:,:-1] = normalize(trainingData[:,:-1])
testingData[:,:-1] = normalize(testingData[:,:-1])

epoch = 10
w = np.random.normal(0,1,2)
eta = 0.01
featureSize = trainingData.shape[1] - 1
bias = 0
mse = np.zeros(epoch)
y_prediction_training = np.zeros(trainingsampleSize)
mse_thres = pow(10,-6)
w_thres = pow(10,-8)
J = np.zeros([trainingsampleSize, featureSize])

def func(w, x):


return np.dot(w,x.T)

for e in range(epoch):

print("Epoch: {}".format(e))
np.random.shuffle(trainingData)
err = []

for i in range(trainingsampleSize):

x = trainingData[i][:-1]
y = trainingData[i][-1]

summation = 0
for p in range(featureSize):
summation = summation + (w[p] * x[p])

summation = summation if summation > bias else 0

y_pred = np.sign(summation)

error = (y - y_pred)

#Update Weights

gradientVector = nd.Gradient(func)(w,x)
w = w + (eta * gradientVector)

err.append(pow(y - y_pred,2))
y_prediction_training[i] = int(y_pred)

mse[e] = sum(err)/len(err)

if mse[e] < mse_thres:


break

print("MSE is: ", mse[e])

#Plotting MSE VS Epoch


pyplot.figure()
pyplot.title("Learning Curve of Half Moon")
pyplot.xlabel("Epochs")
pyplot.ylabel("MSE")
pyplot.plot(mse)

#Misclassification in Training
print("============================")
err = 0
for i in range(trainingsampleSize):

y = trainingData[i][-1]
y_pred = y_prediction_training[i]

if abs(y - y_pred) != 0:
err += 1

print("Misclassified Data Points in Training: {} Out of {}".format(err,


trainingsampleSize))
print("Training Accuracy: {}%".format(round(((trainingsampleSize -
err)/trainingsampleSize) * 100,2)))

y_prediction = np.zeros(testingsampleSize)

for n in range(testingsampleSize):

x = testingData[n][:-1]
y = testingData[n][-1]

summation = 0
for p in range(featureSize):
summation = summation + (w[p] * x[p])

summation = summation if summation > bias else 0

y_pred = np.sign(summation)

y_prediction[n] = y_pred

#Misclassification in Testing

err = 0
for i in range(testingsampleSize):

y = testingData[i][-1]
y_pred = y_prediction[i]

if abs(y - y_pred) != 0:
err += 1

print("Misclassified Data Points in Testing: {} Out of {}".format(err,


testingsampleSize))
print("Training Accuracy: {}%".format(round(((testingsampleSize -
err)/testingsampleSize) * 100,2)))

You might also like