Professional Documents
Culture Documents
Steepest Descent Using Half Moon Dataset
Steepest Descent Using Half Moon Dataset
/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 3 00:06:38 2021
@author: jaydevraval
"""
#Imports
import pandas as pd
import numpy as np
from sklearn.datasets import make_moons
import matplotlib.pyplot as pyplot
import numdifftools as nd
from sklearn.preprocessing import normalize
#Initialization
trainingsampleSize = 1000
testingsampleSize = 3000
noiseParam = 0.1
featureSize = trainingData[0].shape[1]
trainingData = np.column_stack((trainingData[0], trainingData[1])) #Merging
Features and Label
testingData = np.column_stack((testingData[0], testingData[1])) #Merging Features
and Label
for i in trainingData:
if i[2] == 1:
i[1] -= dist
dist = 0
for i in testingData:
if i[2] == 1:
if i[1] > dist:
dist = i[1]
for i in testingData:
if i[2] == 1:
i[1] -= dist
#Normalize Data
trainingData[:,:-1] = normalize(trainingData[:,:-1])
testingData[:,:-1] = normalize(testingData[:,:-1])
epoch = 10
w = np.random.normal(0,1,2)
eta = 0.01
featureSize = trainingData.shape[1] - 1
bias = 0
mse = np.zeros(epoch)
y_prediction_training = np.zeros(trainingsampleSize)
mse_thres = pow(10,-6)
w_thres = pow(10,-8)
J = np.zeros([trainingsampleSize, featureSize])
for e in range(epoch):
print("Epoch: {}".format(e))
np.random.shuffle(trainingData)
err = []
for i in range(trainingsampleSize):
x = trainingData[i][:-1]
y = trainingData[i][-1]
summation = 0
for p in range(featureSize):
summation = summation + (w[p] * x[p])
y_pred = np.sign(summation)
error = (y - y_pred)
#Update Weights
gradientVector = nd.Gradient(func)(w,x)
w = w + (eta * gradientVector)
err.append(pow(y - y_pred,2))
y_prediction_training[i] = int(y_pred)
mse[e] = sum(err)/len(err)
#Misclassification in Training
print("============================")
err = 0
for i in range(trainingsampleSize):
y = trainingData[i][-1]
y_pred = y_prediction_training[i]
if abs(y - y_pred) != 0:
err += 1
y_prediction = np.zeros(testingsampleSize)
for n in range(testingsampleSize):
x = testingData[n][:-1]
y = testingData[n][-1]
summation = 0
for p in range(featureSize):
summation = summation + (w[p] * x[p])
y_pred = np.sign(summation)
y_prediction[n] = y_pred
#Misclassification in Testing
err = 0
for i in range(testingsampleSize):
y = testingData[i][-1]
y_pred = y_prediction[i]
if abs(y - y_pred) != 0:
err += 1