Download as pdf or txt
Download as pdf or txt
You are on page 1of 6

11/24/22, 8:18 PM Untitled6 - Jupyter Notebook

In [138]:  import numpy


import random
import seaborn as sn
import pandas as pd
# library for plotting arrays
import matplotlib.pyplot as plt
import tensorflow as tf
import os
# scipy.special for the sigmoid function: expit()
import scipy.special
# ensure the plots are inside this notebook, not an external window
%matplotlib inline

In [139]:  scce = tf.keras.losses.SparseCategoricalCrossentropy()

In [140]:  urdu_dataset = numpy.load('C:/Users/HP/Documents/Final/dataset/uhat_dataset.n



images_train = urdu_dataset['x_chars_train'] #all character training images e
labels_train = urdu_dataset['y_chars_train'] #training character image labels
images_test = urdu_dataset['x_chars_test'] #all character testing images each
labels_test = urdu_dataset['y_chars_test'] #testing character image labels 0-

#print('Size of train data for characters: ',images_train.shape)
#print('Size of train labels for characters: ',labels_train.shape)

#image_1 = images_train[5000]
#matplotlib.pyplot.imshow(image_1, cmap = matplotlib.pyplot.get_cmap('binary'

# Reshaping both image datasets from 28,328 images, each of dimension 28x28,
# into 28,328 images, each of dimension 1x784. Followed by joining with label

reshaped_train = numpy.reshape(images_train,(28328,784))
training_data = numpy.column_stack((labels_train, reshaped_train))
#print(training_data.shape)

reshaped_test = numpy.reshape(images_test,(4880,784))
testing_data = numpy.column_stack((labels_test, reshaped_test))
#print(testing_data.shape)

In [141]:  # Shuffling the characters order for training and testing


numpy.random.shuffle(training_data)
numpy.random.shuffle(testing_data)

localhost:8888/notebooks/Downloads/Untitled6.ipynb?kernel_name=python3 1/6
11/24/22, 8:18 PM Untitled6 - Jupyter Notebook

In [142]:  class neuralNetwork:

def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate, ke


self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
self.kernel_1 = kernel_1

self.wih = numpy.random.normal(0.0, pow(self.inodes, -0.5), (self.hno


self.who = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.ono
self.lr = learningrate

self.activation_function = lambda x: scipy.special.expit(x)



# helper function to convolve the images before using them
def conv_func(self, inputs_list):
kernel_1 = self.kernel_1
# inputs_list has an image which we will be convolving

# reshape the image into a 28x28 array


inputs_list = inputs_list.reshape((28,28))
conv_image = numpy.ones((26,26))

step = 3
i=0
while i < 25:
i+=1
j = 0
while j < 25:
sub_image = inputs_list[i:(i+step),j:(j+step):]
sub_image = numpy.reshape(sub_image,(1,(step ** 2)))
kernel_1 = numpy.reshape(kernel_1, ((step ** 2),1))
conv_scalar = numpy.dot(sub_image,kernel_1)
conv_image[i,j] = conv_scalar
j+=1
pass
pass

conv_image = numpy.reshape(conv_image, (1,676))

return conv_image

# train the neural network


def train(self, inputs_list, targets_list):

# pre-processing the inputs with convolution


inputs_list = self.conv_func(inputs_list)

# convert inputs list to 2d array


inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T

# calculate signals into hidden layer


hidden_inputs = numpy.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)

localhost:8888/notebooks/Downloads/Untitled6.ipynb?kernel_name=python3 2/6
11/24/22, 8:18 PM Untitled6 - Jupyter Notebook

2
# calculate signals into final output layer
final_inputs = numpy.dot(self.who, hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)

# output layer error is the (target - actual)


output_errors = targets - final_outputs
# hidden layer error is the output_errors, split by weights, recombin
hidden_errors = numpy.dot(self.who.T, output_errors)

# update the weights for the links between the hidden and output laye
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0

# update the weights for the links between the input and hidden layer
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.

# query the neural network


def query(self, inputs_list):
# convert inputs list to 2d array
inputs = numpy.array(inputs_list, ndmin=2).T

# calculate signals into hidden layer


hidden_inputs = numpy.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)

# calculate signals into final output layer


final_inputs = numpy.dot(self.who, hidden_outputs)
# calculate the signals emerging from final output layer
final_outputs = self.activation_function(final_inputs)

return final_outputs

In [143]:  input_nodes = 676


hidden_nodes = 650
output_nodes = 40

learning_rate = 0.03

#Sharpen
kernel_1 = [[0,-1,0],[-1,7,-1],[0,-1,0]]

n = neuralNetwork(input_nodes,hidden_nodes,output_nodes, learning_rate, kerne

localhost:8888/notebooks/Downloads/Untitled6.ipynb?kernel_name=python3 3/6
11/24/22, 8:18 PM Untitled6 - Jupyter Notebook

In [144]:  # train the neural network



epochs = 1

# go through the training data set 30 times
for e in range(epochs):

#if epochs >= 15:


#learning_rate = 0.005
#pass

# taking one images data at a time


for r in range(len(training_data)):

# taking all but first element (label). Then, scaling and shifting ea
inputs = ((training_data[r][1:]) / 255.0 * 0.99) + 0.01

# create the target output values (all 0.01, except the desired label
targets = numpy.zeros(output_nodes) + 0.01
# training_data[0] is the target label for this record
targets[training_data[r][0]] = 0.99
n.train(inputs, targets)
pass
pass

# test the neural network

# scorecard for how well the network performs, initially an empty list
scorecard = []

#building an empty array to add output rows to
predicted = numpy.empty((0,40), int)

Output_Matrix = numpy.zeros((40,40)) # Building an empty 40x40 matrix to fill

# go through all the images in the test data set one by one
for r in range(len(testing_data)):

#???
correct_label = testing_data[r][0]
# scale and shift the input image values
inputs = ((testing_data[r][1:]) / 255.0 * 0.99) + 0.01
# pre-processing the inputs with convolution
inputs = n.conv_func(inputs)
# query the network
outputs = n.query(inputs)
predicted = numpy.append(predicted, outputs)

# x is the number of the column in the output node i.e. total 40


for x in range(len(outputs)): # Running a loop the same number of times a
Output_Matrix[correct_label,x] += float(outputs[x]) # Adds each outpu
pass

# takes the highest value in the output node for the specific image being
label = numpy.argmax(outputs)

localhost:8888/notebooks/Downloads/Untitled6.ipynb?kernel_name=python3 4/6
11/24/22, 8:18 PM Untitled6 - Jupyter Notebook

# append correct or incorrect to list


if (label == correct_label):
# network's answer matches correct answer, add 1 to scorecard
scorecard.append(1)
else:
# network's answer doesn't match correct answer, add 0 to scorecard
scorecard.append(0)
pass
pass

# calculate the performance score, the percentage fraction of correct answers
scorecard_array = numpy.asarray(scorecard)
print ("performance = ", scorecard_array.sum() * 100 / scorecard_array.size)

y_true = testing_data[:,0]

#needs whole output row for that image
y_pred = numpy.reshape(predicted,(4880,40))

loss = scce(y_true, y_pred).numpy()
print(loss)

Confusion_Matrix = numpy.divide(Output_Matrix,488) # Since there are 4,880 te
df_cm = pd.DataFrame(Confusion_Matrix, index = [i for i in range(0,40)], colu
plt.figure(figsize = (50,30))
sn.heatmap(df_cm, annot=True,cmap="Blues")

performance = 53.790983606557376

1.7979512565347056

Out[144]: <AxesSubplot:>

localhost:8888/notebooks/Downloads/Untitled6.ipynb?kernel_name=python3 5/6
11/24/22, 8:18 PM Untitled6 - Jupyter Notebook

In [ ]:  ​

localhost:8888/notebooks/Downloads/Untitled6.ipynb?kernel_name=python3 6/6

You might also like