Ann Practical All

You might also like

Download as docx, pdf, or txt
Download as docx, pdf, or txt
You are on page 1of 23

ANN PRACTICAL A1

1. import numpy as np
import matplotlib.pyplot as plt

# Define the activation functions


def sigmoid(x):
return 1 / (1 + np.exp(-x))

def relu(x):
return np.maximum(0, x)

def tanh(x):
return np.tanh(x)

# Define the x-axis range


x = np.linspace(-5, 5, 100)

# Plot the activation functions


plt.plot(x, sigmoid(x), label='Sigmoid')
plt.plot(x, relu(x), label='ReLU')
plt.plot(x, tanh(x), label='Tanh')

# Set the plot title and labels


plt.title('Activation Functions')
plt.xlabel('Input')
plt.ylabel('Output')

# Show the legend and display the plot


plt.legend()
plt.show()
ANN PRACTICAL A4
import numpy as np
import matplotlib.pyplot as plt

# Define the perceptron function


def perceptron(x, w, b):
return np.sign(np.dot(x, w) + b)

# Define the perceptron learning algorithm


def perceptron_learning(X, Y, learning_rate, n_iterations):
n_samples, n_features = X.shape
w = np.zeros(n_features)
b = 0
for _ in range(n_iterations):
for i in range(n_samples):
x = X[i]
y = Y[i]
y_pred = perceptron(x, w, b)
if y_pred != y:
w += learning_rate * y * x
b += learning_rate * y
return w, b

# Generate some random data for binary classification


np.random.seed(0)
X = np.random.randn(200, 2)
Y = np.zeros(200)
Y[X[:, 0] + X[:, 1] > 0] = 1

# Train the perceptron on the data


w, b = perceptron_learning(X, Y, 0.1, 100)

# Plot the decision boundary


fig, ax = plt.subplots()
ax.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.RdYlBu)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
x = np.linspace(xlim[0], xlim[1], 100)
y = np.linspace(ylim[0], ylim[1], 100)
xx, yy = np.meshgrid(x, y)
xy = np.vstack([xx.ravel(), yy.ravel()]).T
Z = perceptron(xy, w, b).reshape(xx.shape)
ax.contour(xx, yy, Z, colors='k', levels=[-1, 0, 1], alpha=0.5)
ax.set_title('Perceptron Decision Regions')
plt.show()
ANN PRACTICAL A5
import numpy as np

# Define the BAM function


def BAM(X, Y):
n_samples, n_features = X.shape
_, n_labels = Y.shape
W = np.zeros((n_features, n_labels))
for i in range(n_samples):
x = X[i].reshape((n_features, 1))
y = Y[i].reshape((n_labels, 1))
W += np.dot(x, y.T)
return W

# Define the bidirectional recall function


def bidirectional_recall(X, W):
n_samples, n_features = X.shape
Y = np.zeros((n_samples, W.shape[1]))
for i in range(n_samples):
x = X[i].reshape((n_features, 1))
y = np.dot(W.T, x)
y[y >= 0] = 1
y[y < 0] = -1
Y[i] = y.T
return Y

# Define the input and output patterns


X = np.array([[1, 1, -1, -1], [1, -1, 1, -1]])
Y = np.array([[1, -1], [-1, 1]])

# Train the BAM on the patterns


W = BAM(X, Y)

# Recall the output patterns from the input patterns


Y_recalled = bidirectional_recall(X, W)

# Print the output patterns


print('Input Patterns:\n', X)
print('Output Patterns:\n', Y_recalled)

OUTPUT:
Input Patterns:
[[ 1 1 -1 -1]
[ 1 -1 1 -1]]
Output Patterns:
[[ 1. -1.]
[-1. 1.]]

ANN PRACTICAL A7
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
class ANN:

def __init__(self, input_shape, output_shape, hidden_shape):


self.input_shape = input_shape
self.output_shape = output_shape
self.hidden_shape = hidden_shape

self.weights_ih = np.random.randn(self.input_shape, self.hidden_shape)


self.weights_ho = np.random.randn(self.hidden_shape, self.output_shape)

self.biases_h = np.zeros((1, self.hidden_shape))


self.biases_o = np.zeros((1, self.output_shape))

def forward_propagation(self, inputs):


self.hidden_layer = sigmoid(np.dot(inputs, self.weights_ih) +
self.biases_h)
self.output_layer = sigmoid(np.dot(self.hidden_layer, self.weights_ho) +
self.biases_o)

def backpropagation(self, inputs, targets, learning_rate):


error = targets - self.output_layer
d_output = error * sigmoid_derivative(self.output_layer)

error_hidden = d_output.dot(self.weights_ho.T)
d_hidden = error_hidden * sigmoid_derivative(self.hidden_layer)

self.weights_ho += self.hidden_layer.T.dot(d_output) * learning_rate


self.weights_ih += inputs.T.dot(d_hidden) * learning_rate

self.biases_o += np.sum(d_output, axis=0) * learning_rate


self.biases_h += np.sum(d_hidden, axis=0) * learning_rate

# define input and output data


X = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
y = np.array([[0], [1], [1], [0]])
# create ANN instance
ann = ANN(input_shape=3, output_shape=1, hidden_shape=4)
# train ANN
for i in range(10000):
ann.forward_propagation(X)
ann.backpropagation(X, y, 0.1)

# make predictions
ann.forward_propagation(X)
print(ann.output_layer)

OUTPUT:
[[0.0383657 ]
[0.96089964]
[0.9631848 ]
[0.03347953]]
ANN PRACTICAL B1
import numpy as np
# Define the sigmoid function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Define the derivative of sigmoid function
def sigmoid_derivative(x):
return x * (1 - x)

# Define the inputs and expected outputs for XOR function


X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])

# Initialize the weights and biases


input_layer_size = 2
hidden_layer_size = 2
output_layer_size = 1

weights1 = np.random.randn(input_layer_size, hidden_layer_size)


weights2 = np.random.randn(hidden_layer_size, output_layer_size)

bias1 = np.random.randn(hidden_layer_size)
bias2 = np.random.randn(output_layer_size)

# Set the learning rate and number of iterations


learning_rate = 0.1
num_iterations = 100000

# Perform the training using backpropagation


for i in range(num_iterations):
# Forward propagation
hidden_layer = sigmoid(np.dot(X, weights1) + bias1)
output_layer = sigmoid(np.dot(hidden_layer, weights2) + bias2)

# Backward propagation
output_layer_error = y - output_layer
output_layer_delta = output_layer_error * sigmoid_derivative(output_layer)

hidden_layer_error = output_layer_delta.dot(weights2.T)
hidden_layer_delta = hidden_layer_error * sigmoid_derivative(hidden_layer)

# Update the weights and biases


weights2 += hidden_layer.T.dot(output_layer_delta) * learning_rate
bias2 += np.sum(output_layer_delta) * learning_rate

weights1 += X.T.dot(hidden_layer_delta) * learning_rate


bias1 += np.sum(hidden_layer_delta) * learning_rate

# Test the trained model


test_input = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
predicted_output = sigmoid(np.dot(sigmoid(np.dot(test_input, weights1) + bias1),
weights2) + bias2)

print("Predicted output:")
print(np.round(predicted_output))

OUTPUT:
Predicted output:
[[0.]
[1.]
[1.]
[0.]]

ANN PRACTICAL B2
import numpy as np

class ARTNetwork:
def __init__(self, num_input, vigilance):
self.num_input = num_input
self.vigilance = vigilance
self.weights = np.zeros((num_input,))
def train(self, input_pattern):
while True:
net_input = np.dot(input_pattern, self.weights)
if np.sum(input_pattern) <= np.sum(net_input):
return
else:
self.weights += (input_pattern - self.vigilance * net_input) / (1 +
self.vigilance)

def predict(self, input_pattern):


net_input = np.dot(input_pattern, self.weights)
output_pattern = np.zeros((self.num_input,))
output_pattern[net_input.argmax()] = 1
return output_pattern

# create an ART network with 4 input neurons and vigilance = 0.5


art = ARTNetwork(num_input=4, vigilance=0.5)

# train the network on two input patterns


art.train(np.array([1, 0, 0, 0]))
art.train(np.array([0, 0, 1, 0]))

# predict the output for a new input pattern


output = art.predict(np.array([0, 1, 0, 0]))
print(output)

OUTPUT:
[1. 0. 0. 0.]

ANN PRACTICAL B4
import numpy as np
class HopfieldNetwork:

def __init__(self, num_neurons):


self.num_neurons = num_neurons
self.weights = np.zeros((num_neurons, num_neurons))
def train(self, patterns):
for pattern in patterns:
self.weights += np.outer(pattern, pattern)
np.fill_diagonal(self.weights, 0)

def recall(self, pattern):


converged = False
while not converged:
activation = np.dot(self.weights, pattern)
output = np.where(activation > 0, 1, -1)
if np.array_equal(output, pattern):
converged = True
else:
pattern = output
return output
# define the patterns to be stored
patterns = [
[1, 1, -1, -1],
[-1, -1, 1, 1],
[1, -1, 1, -1],
[-1, 1, -1, 1]
]

# create a Hopfield network and train it on the patterns


network = HopfieldNetwork(4)
network.train(patterns)

# test the network by recalling the patterns


for pattern in patterns:
print("Input Pattern:", pattern)
output = network.recall(pattern)
print("Recalled Pattern:", output)
print()

OUTPUT:
Input Pattern: [1, 1, -1, -1]
Recalled Pattern: [ 1 1 -1 -1]
Input Pattern: [-1, -1, 1, 1]
Recalled Pattern: [-1 -1 1 1]
Input Pattern: [1, -1, 1, -1]
Recalled Pattern: [ 1 -1 1 -1]
Input Pattern: [-1, 1, -1, 1]
Recalled Pattern: [-1 1 -1 1]

ANN PRACTICAL B5
import torch
import torchvision
import cv2
import numpy as np

# Define the model


model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
model.eval()
# Load an image
img = cv2.imread('test.jpg')

# Preprocess the image


img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img, (2, 0, 1))
img = torch.from_numpy(img).float().unsqueeze(0)

# Pass the image through the model


output = model(img)[0]

# Extract the bounding boxes, scores, and class labels


boxes = output['boxes'].detach().numpy()
scores = output['scores'].detach().numpy()
labels = output['labels'].detach().numpy()

# Draw the bounding boxes on the image


for i in range(len(boxes)):
if scores[i] > 0.5:
box = boxes[i]
cv2.rectangle(img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])),
(0, 255, 0), 2)
cv2.putText(img, str(labels[i]), (int(box[0]), int(box[1]) - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

# Display the image


img = img.squeeze().numpy()
img = np.transpose(img, (1, 2, 0))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()

OUTPUT:
ANN PRACTICAL C1

import tensorflow as tf
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler

# Load the breast cancer dataset


data = load_breast_cancer()

# Split the data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(data.data, data.target,
test_size=0.2)

# Scale the data


scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

# Define the neural network architecture


model = tf.keras.models.Sequential([
tf.keras.layers.Dense(32, activation='relu', input_shape=(X_train.shape[1],)),
tf.keras.layers.Dense(1, activation='sigmoid')
])

# Compile the model


model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train the model


model.fit(X_train, y_train, epochs=50, batch_size=32, validation_data=(X_test, y_test))

# Evaluate the model on the test set


loss, accuracy = model.evaluate(X_test, y_test)
print(f'Test set loss: {loss:.4f}, accuracy: {accuracy:.4f}')

# Train a logistic regression model using TensorFlow


logistic_model = tf.keras.models.Sequential([
tf.keras.layers.Dense(1, activation='sigmoid', input_shape=(X_train.shape[1],))
])
logistic_model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy'])
logistic_model.fit(X_train, y_train, epochs=50, batch_size=32, validation_data=(X_test,
y_test))
loss, accuracy = logistic_model.evaluate(X_test, y_test)
print(f'Test set loss: {loss:.4f}, accuracy: {accuracy:.4f}')

OUTPUT:
Epoch 1/50
15/15 [==============================] - 1s 27ms/step - loss: 0.7860 - accuracy: 0.4198 - val_loss: 0.7002 -
val_accuracy: 0.4298
Epoch 2/50
15/15 [==============================] - 0s 5ms/step - loss: 0.5435 - accuracy: 0.7011 - val_loss: 0.4789 -
val_accuracy: 0.8596
Epoch 3/50
15/15 [==============================] - 0s 4ms/step - loss: 0.3941 - accuracy: 0.8593 - val_loss: 0.3441 -
val_accuracy: 0.9123
Epoch 4/50
15/15 [==============================] - 0s 4ms/step - loss: 0.3036 - accuracy: 0.9143 - val_loss: 0.2644 -
val_accuracy: 0.9298
Epoch 5/50
15/15 [==============================] - 0s 5ms/step - loss: 0.2487 - accuracy: 0.9363 - val_loss: 0.2133 -
val_accuracy: 0.9474
Epoch 6/50
15/15 [==============================] - 0s 5ms/step - loss: 0.2128 - accuracy: 0.9407 - val_loss: 0.1795 -
val_accuracy: 0.9561
Epoch 7/50
15/15 [==============================] - 0s 8ms/step - loss: 0.1871 - accuracy: 0.9407 - val_loss: 0.1582 -
val_accuracy: 0.9561
Epoch 8/50
15/15 [==============================] - 0s 8ms/step - loss: 0.1698 - accuracy: 0.9429 - val_loss: 0.1415 -
val_accuracy: 0.9561
Epoch 9/50
15/15 [==============================] - 0s 5ms/step - loss: 0.1559 - accuracy: 0.9495 - val_loss: 0.1281 -
val_accuracy: 0.9561
Epoch 10/50
15/15 [==============================] - 0s 5ms/step - loss: 0.1446 - accuracy: 0.9538 - val_loss: 0.1176 -
val_accuracy: 0.9561
Epoch 11/50
15/15 [==============================] - 0s 3ms/step - loss: 0.1353 - accuracy: 0.9560 - val_loss: 0.1094 -
val_accuracy: 0.9561
Epoch 12/50
15/15 [==============================] - 0s 4ms/step - loss: 0.1277 - accuracy: 0.9560 - val_loss: 0.1027 -
val_accuracy: 0.9737
Epoch 13/50
15/15 [==============================] - 0s 3ms/step - loss: 0.1210 - accuracy: 0.9604 - val_loss: 0.0966 -
val_accuracy: 0.9737
Epoch 14/50
15/15 [==============================] - 0s 4ms/step - loss: 0.1152 - accuracy: 0.9604 - val_loss: 0.0911 -
val_accuracy: 0.9825
Epoch 15/50
15/15 [==============================] - 0s 4ms/step - loss: 0.1103 - accuracy: 0.9648 - val_loss: 0.0866 -
val_accuracy: 0.9825
Epoch 16/50
15/15 [==============================] - 0s 4ms/step - loss: 0.1054 - accuracy: 0.9670 - val_loss: 0.0817 -
val_accuracy: 0.9825
Epoch 17/50
15/15 [==============================] - 0s 5ms/step - loss: 0.1016 - accuracy: 0.9670 - val_loss: 0.0772 -
val_accuracy: 0.9825
Epoch 18/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0978 - accuracy: 0.9670 - val_loss: 0.0742 -
val_accuracy: 0.9825
Epoch 19/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0947 - accuracy: 0.9670 - val_loss: 0.0717 -
val_accuracy: 0.9825
Epoch 20/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0916 - accuracy: 0.9714 - val_loss: 0.0690 -
val_accuracy: 0.9825
Epoch 21/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0888 - accuracy: 0.9714 - val_loss: 0.0667 -
val_accuracy: 0.9825
Epoch 22/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0863 - accuracy: 0.9714 - val_loss: 0.0647 -
val_accuracy: 0.9825
Epoch 23/50
15/15 [==============================] - 0s 5ms/step - loss: 0.0837 - accuracy: 0.9736 - val_loss: 0.0625 -
val_accuracy: 0.9825
Epoch 24/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0817 - accuracy: 0.9758 - val_loss: 0.0612 -
val_accuracy: 0.9825
Epoch 25/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0797 - accuracy: 0.9758 - val_loss: 0.0586 -
val_accuracy: 0.9912
Epoch 26/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0776 - accuracy: 0.9780 - val_loss: 0.0578 -
val_accuracy: 0.9912
Epoch 27/50
15/15 [==============================] - 0s 3ms/step - loss: 0.0759 - accuracy: 0.9780 - val_loss: 0.0563 -
val_accuracy: 0.9912
Epoch 28/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0743 - accuracy: 0.9780 - val_loss: 0.0563 -
val_accuracy: 1.0000
Epoch 29/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0728 - accuracy: 0.9780 - val_loss: 0.0549 -
val_accuracy: 1.0000
Epoch 30/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0714 - accuracy: 0.9780 - val_loss: 0.0526 -
val_accuracy: 1.0000
Epoch 31/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0701 - accuracy: 0.9780 - val_loss: 0.0509 -
val_accuracy: 1.0000
Epoch 32/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0685 - accuracy: 0.9780 - val_loss: 0.0516 -
val_accuracy: 1.0000
Epoch 33/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0674 - accuracy: 0.9802 - val_loss: 0.0492 -
val_accuracy: 1.0000
Epoch 34/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0661 - accuracy: 0.9802 - val_loss: 0.0487 -
val_accuracy: 1.0000
Epoch 35/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0648 - accuracy: 0.9802 - val_loss: 0.0476 -
val_accuracy: 1.0000
Epoch 36/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0638 - accuracy: 0.9846 - val_loss: 0.0468 -
val_accuracy: 1.0000
Epoch 37/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0626 - accuracy: 0.9846 - val_loss: 0.0450 -
val_accuracy: 1.0000
Epoch 38/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0616 - accuracy: 0.9846 - val_loss: 0.0452 -
val_accuracy: 1.0000
Epoch 39/50
15/15 [==============================] - 0s 7ms/step - loss: 0.0605 - accuracy: 0.9846 - val_loss: 0.0444 -
val_accuracy: 1.0000
Epoch 40/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0595 - accuracy: 0.9846 - val_loss: 0.0441 -
val_accuracy: 1.0000
Epoch 41/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0587 - accuracy: 0.9846 - val_loss: 0.0434 -
val_accuracy: 1.0000
Epoch 42/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0578 - accuracy: 0.9846 - val_loss: 0.0449 -
val_accuracy: 1.0000
Epoch 43/50
15/15 [==============================] - 0s 7ms/step - loss: 0.0566 - accuracy: 0.9846 - val_loss: 0.0467 -
val_accuracy: 0.9825
Epoch 44/50
15/15 [==============================] - 0s 6ms/step - loss: 0.0559 - accuracy: 0.9846 - val_loss: 0.0471 -
val_accuracy: 0.9737
Epoch 45/50
15/15 [==============================] - 0s 5ms/step - loss: 0.0550 - accuracy: 0.9868 - val_loss: 0.0471 -
val_accuracy: 0.9825
Epoch 46/50
15/15 [==============================] - 0s 5ms/step - loss: 0.0542 - accuracy: 0.9868 - val_loss: 0.0457 -
val_accuracy: 0.9912
Epoch 47/50
15/15 [==============================] - 0s 3ms/step - loss: 0.0533 - accuracy: 0.9846 - val_loss: 0.0443 -
val_accuracy: 0.9912
Epoch 48/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0524 - accuracy: 0.9846 - val_loss: 0.0433 -
val_accuracy: 0.9912
Epoch 49/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0516 - accuracy: 0.9868 - val_loss: 0.0420 -
val_accuracy: 0.9912
Epoch 50/50
15/15 [==============================] - 0s 4ms/step - loss: 0.0509 - accuracy: 0.9868 - val_loss: 0.0422 -
val_accuracy: 1.0000
4/4 [==============================] - 0s 2ms/step - loss: 0.0422 - accuracy: 1.0000
Test set loss: 0.0422, accuracy: 1.0000
Epoch 1/50
15/15 [==============================] - 1s 13ms/step - loss: 1.7898 - accuracy: 0.1758 - val_loss: 1.5630 -
val_accuracy: 0.1754
Epoch 2/50
15/15 [==============================] - 0s 4ms/step - loss: 1.6253 - accuracy: 0.2022 - val_loss: 1.4085 -
val_accuracy: 0.2018
Epoch 3/50
15/15 [==============================] - 0s 4ms/step - loss: 1.4732 - accuracy: 0.2352 - val_loss: 1.2629 -
val_accuracy: 0.2895
Epoch 4/50
15/15 [==============================] - 0s 4ms/step - loss: 1.3293 - accuracy: 0.2725 - val_loss: 1.1318 -
val_accuracy: 0.3596
Epoch 5/50
15/15 [==============================] - 0s 4ms/step - loss: 1.1994 - accuracy: 0.3231 - val_loss: 1.0122 -
val_accuracy: 0.4386
Epoch 6/50
15/15 [==============================] - 0s 4ms/step - loss: 1.0812 - accuracy: 0.3736 - val_loss: 0.9042 -
val_accuracy: 0.5000
Epoch 7/50
15/15 [==============================] - 0s 4ms/step - loss: 0.9743 - accuracy: 0.4374 - val_loss: 0.8107 -
val_accuracy: 0.5614
Epoch 8/50
15/15 [==============================] - 0s 4ms/step - loss: 0.8813 - accuracy: 0.5033 - val_loss: 0.7301 -
val_accuracy: 0.6228
Epoch 9/50
15/15 [==============================] - 0s 3ms/step - loss: 0.7998 - accuracy: 0.5626 - val_loss: 0.6616 -
val_accuracy: 0.6667
Epoch 10/50
15/15 [==============================] - 0s 4ms/step - loss: 0.7293 - accuracy: 0.6198 - val_loss: 0.6016 -
val_accuracy: 0.7105
Epoch 11/50
15/15 [==============================] - 0s 3ms/step - loss: 0.6692 - accuracy: 0.6725 - val_loss: 0.5475 -
val_accuracy: 0.7807
Epoch 12/50
15/15 [==============================] - 0s 4ms/step - loss: 0.6141 - accuracy: 0.6945 - val_loss: 0.5046 -
val_accuracy: 0.8333
Epoch 13/50
15/15 [==============================] - 0s 3ms/step - loss: 0.5683 - accuracy: 0.7297 - val_loss: 0.4670 -
val_accuracy: 0.8596
Epoch 14/50
15/15 [==============================] - 0s 4ms/step - loss: 0.5289 - accuracy: 0.7626 - val_loss: 0.4348 -
val_accuracy: 0.8684
Epoch 15/50
15/15 [==============================] - 0s 4ms/step - loss: 0.4935 - accuracy: 0.7802 - val_loss: 0.4071 -
val_accuracy: 0.8684
Epoch 16/50
15/15 [==============================] - 0s 4ms/step - loss: 0.4629 - accuracy: 0.8000 - val_loss: 0.3826 -
val_accuracy: 0.8860
Epoch 17/50
15/15 [==============================] - 0s 6ms/step - loss: 0.4367 - accuracy: 0.8198 - val_loss: 0.3615 -
val_accuracy: 0.8947
Epoch 18/50
15/15 [==============================] - 0s 7ms/step - loss: 0.4132 - accuracy: 0.8484 - val_loss: 0.3430 -
val_accuracy: 0.9035
Epoch 19/50
15/15 [==============================] - 0s 6ms/step - loss: 0.3926 - accuracy: 0.8527 - val_loss: 0.3260 -
val_accuracy: 0.9035
Epoch 20/50
15/15 [==============================] - 0s 5ms/step - loss: 0.3743 - accuracy: 0.8593 - val_loss: 0.3105 -
val_accuracy: 0.9211
Epoch 21/50
15/15 [==============================] - 0s 4ms/step - loss: 0.3573 - accuracy: 0.8725 - val_loss: 0.2970 -
val_accuracy: 0.9211
Epoch 22/50
15/15 [==============================] - 0s 5ms/step - loss: 0.3424 - accuracy: 0.8747 - val_loss: 0.2844 -
val_accuracy: 0.9386
Epoch 23/50
15/15 [==============================] - 0s 5ms/step - loss: 0.3288 - accuracy: 0.8791 - val_loss: 0.2734 -
val_accuracy: 0.9386
Epoch 24/50
15/15 [==============================] - 0s 5ms/step - loss: 0.3167 - accuracy: 0.8835 - val_loss: 0.2629 -
val_accuracy: 0.9386
Epoch 25/50
15/15 [==============================] - 0s 7ms/step - loss: 0.3049 - accuracy: 0.8879 - val_loss: 0.2537 -
val_accuracy: 0.9386
Epoch 26/50
15/15 [==============================] - 0s 5ms/step - loss: 0.2950 - accuracy: 0.8901 - val_loss: 0.2448 -
val_accuracy: 0.9386
Epoch 27/50
15/15 [==============================] - 0s 5ms/step - loss: 0.2847 - accuracy: 0.9011 - val_loss: 0.2364 -
val_accuracy: 0.9386
Epoch 28/50
15/15 [==============================] - 0s 5ms/step - loss: 0.2751 - accuracy: 0.9077 - val_loss: 0.2285 -
val_accuracy: 0.9386
Epoch 29/50
15/15 [==============================] - 0s 4ms/step - loss: 0.2667 - accuracy: 0.9143 - val_loss: 0.2209 -
val_accuracy: 0.9474
Epoch 30/50
15/15 [==============================] - 0s 4ms/step - loss: 0.2585 - accuracy: 0.9187 - val_loss: 0.2146 -
val_accuracy: 0.9474
Epoch 31/50
15/15 [==============================] - 0s 5ms/step - loss: 0.2511 - accuracy: 0.9209 - val_loss: 0.2085 -
val_accuracy: 0.9474
Epoch 32/50
15/15 [==============================] - 0s 4ms/step - loss: 0.2446 - accuracy: 0.9231 - val_loss: 0.2025 -
val_accuracy: 0.9561
Epoch 33/50
15/15 [==============================] - 0s 5ms/step - loss: 0.2382 - accuracy: 0.9297 - val_loss: 0.1973 -
val_accuracy: 0.9561
Epoch 34/50
15/15 [==============================] - 0s 4ms/step - loss: 0.2325 - accuracy: 0.9385 - val_loss: 0.1920 -
val_accuracy: 0.9561
Epoch 35/50
15/15 [==============================] - 0s 4ms/step - loss: 0.2269 - accuracy: 0.9385 - val_loss: 0.1871 -
val_accuracy: 0.9649
Epoch 36/50
15/15 [==============================] - 0s 4ms/step - loss: 0.2217 - accuracy: 0.9407 - val_loss: 0.1825 -
val_accuracy: 0.9649
Epoch 37/50
15/15 [==============================] - 0s 4ms/step - loss: 0.2170 - accuracy: 0.9407 - val_loss: 0.1780 -
val_accuracy: 0.9649
Epoch 38/50
15/15 [==============================] - 0s 4ms/step - loss: 0.2125 - accuracy: 0.9429 - val_loss: 0.1739 -
val_accuracy: 0.9649
Epoch 39/50
15/15 [==============================] - 0s 5ms/step - loss: 0.2082 - accuracy: 0.9495 - val_loss: 0.1703 -
val_accuracy: 0.9737
Epoch 40/50
15/15 [==============================] - 0s 5ms/step - loss: 0.2041 - accuracy: 0.9516 - val_loss: 0.1665 -
val_accuracy: 0.9825
Epoch 41/50
15/15 [==============================] - 0s 5ms/step - loss: 0.2003 - accuracy: 0.9538 - val_loss: 0.1629 -
val_accuracy: 0.9825
Epoch 42/50
15/15 [==============================] - 0s 5ms/step - loss: 0.1966 - accuracy: 0.9538 - val_loss: 0.1597 -
val_accuracy: 0.9825
Epoch 43/50
15/15 [==============================] - 0s 4ms/step - loss: 0.1933 - accuracy: 0.9538 - val_loss: 0.1565 -
val_accuracy: 0.9825
Epoch 44/50
15/15 [==============================] - 0s 5ms/step - loss: 0.1899 - accuracy: 0.9560 - val_loss: 0.1536 -
val_accuracy: 0.9825
Epoch 45/50
15/15 [==============================] - 0s 5ms/step - loss: 0.1869 - accuracy: 0.9560 - val_loss: 0.1506 -
val_accuracy: 0.9825
Epoch 46/50
15/15 [==============================] - 0s 4ms/step - loss: 0.1838 - accuracy: 0.9604 - val_loss: 0.1478 -
val_accuracy: 0.9825
Epoch 47/50
15/15 [==============================] - 0s 4ms/step - loss: 0.1810 - accuracy: 0.9604 - val_loss: 0.1451 -
val_accuracy: 0.9825
Epoch 48/50
15/15 [==============================] - 0s 5ms/step - loss: 0.1782 - accuracy: 0.9604 - val_loss: 0.1425 -
val_accuracy: 0.9825
Epoch 49/50
15/15 [==============================] - 0s 4ms/step - loss: 0.1756 - accuracy: 0.9626 - val_loss: 0.1400 -
val_accuracy: 0.9825
Epoch 50/50
15/15 [==============================] - 0s 3ms/step - loss: 0.1731 - accuracy: 0.9626 - val_loss: 0.1378 -
val_accuracy: 0.9825
4/4 [==============================] - 0s 2ms/step - loss: 0.1378 - accuracy: 0.9825
Test set loss: 0.1378, accuracy: 0.9825

ANN PRACTICAL C2
import tensorflow as tf

# Define the model architecture


model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])

# Compile the model with an optimizer, loss function, and evaluation metric
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])

# Load the MNIST dataset and preprocess the images


mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_train = x_train / 255.0
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
x_test = x_test / 255.0
y_train = tf.keras.utils.to_categorical(y_train, 10)
y_test = tf.keras.utils.to_categorical(y_test, 10)

# Train the model


model.fit(x_train, y_train, epochs=5)

# Evaluate the model on the test set


test_loss, test_accuracy = model.evaluate(x_test, y_test)
print("Test Loss:", test_loss)
print("Test Accuracy:", test_accuracy)

OUTPUT:
Downloading data from
https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11490434/11490434 [==============================] - 6s 1us/step
Epoch 1/5
1875/1875 [==============================] - 21s 11ms/step - loss: 0.1547 -
accuracy: 0.9534
Epoch 2/5
1875/1875 [==============================] - 20s 11ms/step - loss: 0.0524 -
accuracy: 0.9841
Epoch 3/5
1875/1875 [==============================] - 20s 11ms/step - loss: 0.0330 -
accuracy: 0.9894
Epoch 4/5
1875/1875 [==============================] - 20s 11ms/step - loss: 0.0216 -
accuracy: 0.9934
Epoch 5/5
1875/1875 [==============================] - 21s 11ms/step - loss: 0.0146 -
accuracy: 0.9956
313/313 [==============================] - 1s 3ms/step - loss: 0.0493 - accuracy:
0.9859
Test Loss: 0.04932432249188423
Test Accuracy: 0.9858999848365784

ANN PRACTICAL C4

In [ ]: import tensorflow from tensorflow


import keras from tensorflow.keras
import Sequential from tensorflow.keras.layers
import Dense,Flatten
In [ ]: (X_train,y_train),(X_test,y_test) = keras.datasets.mnist.load_data()
In [ ]: X_test.shape
Out[ ]: (10000, 28, 28)
In [ ]: y_train
Out[ ]: array([5, 0, 4, ..., 5, 6, 8], dtype=uint8)
In [ ]: import matplotlib.pyplot as plt plt.imshow(X_train[2])
Out[ ]: <matplotlib.image.AxesImage at 0x7f573ed9f790>

In [ ]:
X_train[0]
X_train = X_train/255
X_test = X_test/255

Out[ ]: array([[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.
, 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.
, 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.
, 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.
, 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.
, 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.01176471, 0.07058824, 0.07058824,
0.07058824, 0.49411765, 0.53333333, 0.68627451, 0.10196078,
0.65098039, 1. , 0.96862745, 0.49803922, 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.11764706, 0.14117647, 0.36862745, 0.60392157, 0.66666667, 0.99215686,
0.99215686, 0.99215686, 0.99215686, 0.99215686, 0.88235294, 0.6745098 ,
0.99215686, 0.94901961, 0.76470588, 0.25098039, 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.19215686, 0.93333333, 0.99215686, 0.99215686, 0.99215686, 0.99215686, 0.99215686,
0.99215686, 0.99215686, 0.99215686, 0.98431373, 0.36470588, 0.32156863,
0.32156863, 0.21960784, 0.15294118, 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.07058824, 0.85882353, 0.99215686, 0.99215686, 0.99215686, 0.99215686, 0.99215686,
0.77647059, 0.71372549, 0.96862745, 0.94509804, 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.31372549, 0.61176471,
0.41960784, 0.99215686, 0.99215686, 0.80392157, 0.04313725, 0. , 0.16862745, 0.60392157, 0. ,
0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0.05490196, 0.00392157, 0.60392157, 0.99215686, 0.35294118, 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0.54509804, 0.99215686, 0.74509804, 0.00784314, 0.
, 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0.04313725, 0.74509804, 0.99215686, 0.2745098 , 0.
, 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.1372549 , 0.94509804, 0.88235294,
0.62745098, 0.42352941, 0.00392157, 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.31764706, 0.94117647, 0.99215686,
0.99215686, 0.46666667, 0.09803922, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.17647059, 0.72941176,
0.99215686, 0.99215686, 0.58823529, 0.10588235,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.0627451 ,
0.36470588, 0.98823529, 0.99215686, 0.73333333,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0.97647059, 0.99215686, 0.97647059,
0.25098039, 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.18039216,
0.50980392, 0.71764706, 0.99215686, 0.99215686, 0.81176471,
0.00784314, 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.15294118, 0.58039216, 0.89803922,
0.99215686, 0.99215686, 0.99215686, 0.98039216, 0.71372549,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0.09411765, 0.44705882, 0.86666667, 0.99215686, 0.99215686,
0.99215686, 0.99215686, 0.78823529, 0.30588235, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.09019608, 0.25882353, 0.83529412, 0.99215686, 0.99215686, 0.99215686,
0.99215686, 0.77647059, 0.31764706, 0.00784314, 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0.07058824, 0.67058824, 0.85882353, 0.99215686, 0.99215686, 0.99215686, 0.99215686,
0.76470588, 0.31372549, 0.03529412, 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
, 0. , 0. , 0. , 0.21568627,
0.6745098 , 0.88627451, 0.99215686, 0.99215686, 0.99215686, 0.99215686, 0.95686275, 0.52156863,
0.04313725, 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0.53333333,
0.99215686, 0.99215686, 0.99215686, 0.83137255, 0.52941176, 0.51764706, 0.0627451 , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.
, 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.
, 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.
, 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. ]])
In [ ]: model = Sequential()
model.add(Flatten(input_shape=(28,28))) model.add(Dense(128,activation='relu'))
model.add(Dense(32,activation='relu')) model.add(Dense(10,activation='softmax'))
In [ ]: model.summary()
Model: "sequential_5"
_________________________________________________________________
Layer (type) Output Shape Param #
================================================================= flatten_4 (Flatten)
(None, 784) 0 dense_11 (Dense) (None, 128)
100480 dense_12 (Dense) (None, 32) 4128
dense_13 (Dense) (None, 10) 330
=================================================================
Total params: 104,938
Trainable params: 104,938
Non-trainable params: 0
_________________________________________________________________
In [ ]: model.compile(loss='sparse_categorical_crossentropy',optimizer='Adam',metrics=['acc
In [ ]: history = model.fit(X_train,y_train,epochs=25,validation_split=0.2)
Epoch 1/25
1500/1500 [==============================] - 5s 3ms/step - loss: 0.2845 - accuracy:
0.9168 - val_loss: 0.1425 - val_accuracy: 0.9570
Epoch 2/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.1164 - accuracy:
0.9638 - val_loss: 0.1258 - val_accuracy: 0.9605
Epoch 3/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0800 - accuracy:
0.9754 - val_loss: 0.0912 - val_accuracy: 0.9728
Epoch 4/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0594 - accuracy:
0.9817 - val_loss: 0.1014 - val_accuracy: 0.9697
Epoch 5/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0462 - accuracy:
0.9851 - val_loss: 0.1024 - val_accuracy: 0.9687
Epoch 6/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0377 - accuracy:
0.9881 - val_loss: 0.1060 - val_accuracy: 0.9712
Epoch 7/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0313 - accuracy:
0.9898 - val_loss: 0.0960 - val_accuracy: 0.9758
Epoch 8/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0283 - accuracy:
0.9902 - val_loss: 0.1061 - val_accuracy: 0.9732
Epoch 9/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0230 - accuracy:
0.9923 - val_loss: 0.1160 - val_accuracy: 0.9713
Epoch 10/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0179 - accuracy:
0.9939 - val_loss: 0.1134 - val_accuracy: 0.9761
Epoch 11/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0177 - accuracy:
0.9937 - val_loss: 0.1204 - val_accuracy: 0.9742
Epoch 12/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0156 - accuracy:
0.9947 - val_loss: 0.1103 - val_accuracy: 0.9761
Epoch 13/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0156 - accuracy:
0.9949 - val_loss: 0.1221 - val_accuracy: 0.9743
Epoch 14/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0121 - accuracy:
0.9960 - val_loss: 0.1246 - val_accuracy: 0.9764
Epoch 15/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0122 - accuracy:
0.9958 - val_loss: 0.1362 - val_accuracy: 0.9753
Epoch 16/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0100 - accuracy:
0.9964 - val_loss: 0.1340 - val_accuracy: 0.9772
Epoch 17/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0131 - accuracy:
0.9955 - val_loss: 0.1371 - val_accuracy: 0.9752
Epoch 18/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0101 - accuracy:
0.9968 - val_loss: 0.1318 - val_accuracy: 0.9760
Epoch 19/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0116 - accuracy:
0.9962 - val_loss: 0.1577 - val_accuracy: 0.9732
Epoch 20/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0093 - accuracy:
0.9969 - val_loss: 0.1570 - val_accuracy: 0.9718
Epoch 21/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0095 - accuracy:
0.9970 - val_loss: 0.1347 - val_accuracy: 0.9772
Epoch 22/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0076 - accuracy:
0.9975 - val_loss: 0.1591 - val_accuracy: 0.9762
Epoch 23/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0079 - accuracy:
0.9975 - val_loss: 0.1919 - val_accuracy: 0.9682
Epoch 24/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0078 - accuracy:
0.9974 - val_loss: 0.1692 - val_accuracy: 0.9737
Epoch 25/25
1500/1500 [==============================] - 4s 3ms/step - loss: 0.0084 - accuracy:
0.9971 - val_loss: 0.1647 - val_accuracy: 0.9753
In [ ]: y_prob = model.predict(X_test)
In [ ]: y_pred = y_prob.argmax(axis=1)
In [ ]: from sklearn.metrics import accuracy_score accuracy_score(y_test,y_pred)
Out[ ]: 0.9755
In [ ]: plt.plot(history.history['loss']) plt.plot(history.history['val_loss'])
Out[ ]: [<matplotlib.lines.Line2D at 0x7f5737a6c590>]
plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy'])

In [ ]:
Out[ ]: [<matplotlib.lines.Line2D at 0x7f5737831e50>]

In [ ]: plt.imshow(X_test[1])
Out[ ]: <matplotlib.image.AxesImage at 0x7f5736844c50>
In [ ]: model.predict(X_test[1].reshape(1,28,28)).argmax(axis=1)
Out[ ]: array([2])
In [ ]:

You might also like