Download as pdf or txt
Download as pdf or txt
You are on page 1of 4

21/03/2024, 21:20 Untitled17.

ipynb - Colaboratory

from google.colab import drive


drive.mount('/content/drive')

Mounted at /content/drive

Double-click (or enter) to edit

import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D

Start coding or generate with AI.

Start coding or generate with AI.

keyboard_arrow_down Define data directories


train_data_dir = "/content/drive/MyDrive/data/Snake Images/train"
test_data_dir = "/content/drive/MyDrive/data/Snake Images/test"

# Image size and batch size


img_height, img_width = 224, 224
batch_size = 32

# Use ImageDataGenerator to load the data and perform data augmentation


train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2) # 80% for training, 20% for validation

test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
subset='training') # Use subset 'training' for training data

Found 1420 images belonging to 2 classes.

validation_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
subset='validation') # Use subset 'validation' for validation data

Found 355 images belonging to 2 classes.

test_generator = test_datagen.flow_from_directory(
test_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')

Found 269 images belonging to 2 classes.

https://colab.research.google.com/drive/107zbX3NrHW8LuMC7Zgs8IwvE7euH5MLE#scrollTo=tI0Tb9kHSdDb&printMode=true 1/4
21/03/2024, 21:20 Untitled17.ipynb - Colaboratory
# Load MobileNetV2 pre-trained model
base_model = MobileNetV2(weights='imagenet', include_top=False, input_shape=(img_height, img_width, 3))

# Freeze the base model layers


for layer in base_model.layers:
layer.trainable = False

Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/mobilenet_v2_weights_tf_dim_ordering


9406464/9406464 [==============================] - 0s 0us/step

# Create the model


model = Sequential([
base_model,
GlobalAveragePooling2D(),
Dense(256, activation='relu'),
Dense(train_generator.num_classes, activation='softmax')
])

# Compile the model


model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])

# Train the model


model.fit(
train_generator,
steps_per_epoch=train_generator.samples // batch_size,
validation_data=validation_generator,
validation_steps=validation_generator.samples // batch_size,
epochs=10)

Epoch 1/10
44/44 [==============================] - 599s 14s/step - loss: 0.6070 - accuracy: 0.7147 - val_loss: 0.4611 - val_accuracy: 0.7699
Epoch 2/10
44/44 [==============================] - 31s 710ms/step - loss: 0.3612 - accuracy: 0.8393 - val_loss: 0.3795 - val_accuracy: 0.8125
Epoch 3/10
44/44 [==============================] - 27s 623ms/step - loss: 0.3163 - accuracy: 0.8682 - val_loss: 0.4045 - val_accuracy: 0.7955
Epoch 4/10
44/44 [==============================] - 32s 731ms/step - loss: 0.2540 - accuracy: 0.9020 - val_loss: 0.5126 - val_accuracy: 0.7784
Epoch 5/10
44/44 [==============================] - 31s 703ms/step - loss: 0.2177 - accuracy: 0.9157 - val_loss: 0.4270 - val_accuracy: 0.8125
Epoch 6/10
44/44 [==============================] - 32s 729ms/step - loss: 0.1940 - accuracy: 0.9215 - val_loss: 0.3852 - val_accuracy: 0.8381
Epoch 7/10
44/44 [==============================] - 31s 702ms/step - loss: 0.1530 - accuracy: 0.9517 - val_loss: 0.4259 - val_accuracy: 0.8210
Epoch 8/10
44/44 [==============================] - 27s 617ms/step - loss: 0.1236 - accuracy: 0.9604 - val_loss: 0.4063 - val_accuracy: 0.8267
Epoch 9/10
44/44 [==============================] - 32s 730ms/step - loss: 0.1145 - accuracy: 0.9669 - val_loss: 0.4502 - val_accuracy: 0.8210
Epoch 10/10
44/44 [==============================] - 27s 611ms/step - loss: 0.1025 - accuracy: 0.9640 - val_loss: 0.4388 - val_accuracy: 0.8324
<keras.src.callbacks.History at 0x7d795ffaa830>

# Evaluate the model on test data


test_loss, test_acc = model.evaluate(test_generator, verbose=2)
print('\nTest accuracy:', test_acc)

9/9 - 93s - loss: 0.3970 - accuracy: 0.8290 - 93s/epoch - 10s/step

Test accuracy: 0.8289963006973267

# Train the model and store the history


history = model.fit(
train_generator,
steps_per_epoch=train_generator.samples // batch_size,
validation_data=validation_generator,
validation_steps=validation_generator.samples // batch_size,
epochs=10)

# Get training accuracy from history


training_accuracy = history.history['accuracy'][-1]
print('\nTraining accuracy:', training_accuracy)

Epoch 1/10
44/44 [==============================] - 29s 659ms/step - loss: 0.0835 - accuracy: 0.9769 - val_loss: 0.4295 - val_accuracy: 0.8352
Epoch 2/10
44/44 [==============================] - 28s 634ms/step - loss: 0.0804 - accuracy: 0.9748 - val_loss: 0.6007 - val_accuracy: 0.7756
Epoch 3/10
44/44 [==============================] - 27s 612ms/step - loss: 0.0659 - accuracy: 0.9863 - val_loss: 0.5312 - val_accuracy: 0.8125
Epoch 4/10

https://colab.research.google.com/drive/107zbX3NrHW8LuMC7Zgs8IwvE7euH5MLE#scrollTo=tI0Tb9kHSdDb&printMode=true 2/4
21/03/2024, 21:20 Untitled17.ipynb - Colaboratory
44/44 [==============================] - 32s 725ms/step - loss: 0.0661 - accuracy: 0.9805 - val_loss: 0.4647 - val_accuracy: 0.8125
Epoch 5/10
44/44 [==============================] - 33s 765ms/step - loss: 0.0722 - accuracy: 0.9748 - val_loss: 0.6306 - val_accuracy: 0.7699
Epoch 6/10
44/44 [==============================] - 28s 640ms/step - loss: 0.0458 - accuracy: 0.9878 - val_loss: 0.4580 - val_accuracy: 0.8239
Epoch 7/10
44/44 [==============================] - 32s 731ms/step - loss: 0.0349 - accuracy: 0.9935 - val_loss: 0.5271 - val_accuracy: 0.8068
Epoch 8/10
44/44 [==============================] - 33s 759ms/step - loss: 0.0336 - accuracy: 0.9921 - val_loss: 0.5683 - val_accuracy: 0.7983
Epoch 9/10
44/44 [==============================] - 33s 753ms/step - loss: 0.0336 - accuracy: 0.9928 - val_loss: 0.4786 - val_accuracy: 0.8324
Epoch 10/10
44/44 [==============================] - 32s 729ms/step - loss: 0.0341 - accuracy: 0.9906 - val_loss: 0.5153 - val_accuracy: 0.8068

Training accuracy: 0.990634024143219

import matplotlib.pyplot as plt

# Get the first batch of images and labels from the training set
images, labels = next(train_generator)

# Display the first nine images and labels


plt.figure(figsize=(20, 20))
for i in range(19):
ax = plt.subplot(6, 6, i + 1)
plt.imshow(images[i])
plt.title(f'Label: {labels[i]}')
plt.axis('off')
plt.show()

https://colab.research.google.com/drive/107zbX3NrHW8LuMC7Zgs8IwvE7euH5MLE#scrollTo=tI0Tb9kHSdDb&printMode=true 3/4
21/03/2024, 21:20 Untitled17.ipynb - Colaboratory

https://colab.research.google.com/drive/107zbX3NrHW8LuMC7Zgs8IwvE7euH5MLE#scrollTo=tI0Tb9kHSdDb&printMode=true 4/4

You might also like