Download as docx, pdf, or txt
Download as docx, pdf, or txt
You are on page 1of 15

Skin Cancer Images - CNN - Deep

Learning
Skin Cancer Image Classification (benign vs malign) -
Deep Learning CNN¶
In [35]:
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
import os
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import OrdinalEncoder

np.random.seed(42)
from keras.utils.np_utils import to_categorical # used for converting labels to one-
hot-encoding
from sklearn.model_selection import train_test_split
from scipy import stats
from sklearn.preprocessing import LabelEncoder
# import autokeras as ak

import keras
from keras.utils.np_utils import to_categorical # used for converting labels to one-
hot-encoding
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D,
BatchNormalization
from sklearn.model_selection import train_test_split
from scipy import stats
from sklearn.preprocessing import LabelEncoder
In [36]:
folder_benign_train = '../input/skin-cancer-malignant-vs-benign/train/benign'
folder_malignant_train = '../input/skin-cancer-malignant-vs-benign/train/malignant'

folder_benign_test = '../input/skin-cancer-malignant-vs-benign/test/benign'
folder_malignant_test = '../input/skin-cancer-malignant-vs-benign/test/malignant'

read = lambda imname: np.asarray(Image.open(imname).convert("RGB"))

# Load in training pictures


ims_benign = [read(os.path.join(folder_benign_train, filename)) for filename in
os.listdir(folder_benign_train)]
X_benign = np.array(ims_benign, dtype='uint8')
ims_malignant = [read(os.path.join(folder_malignant_train, filename)) for filename in
os.listdir(folder_malignant_train)]
X_malignant = np.array(ims_malignant, dtype='uint8')

# Load in testing pictures


ims_benign = [read(os.path.join(folder_benign_test, filename)) for filename in
os.listdir(folder_benign_test)]
X_benign_test = np.array(ims_benign, dtype='uint8')
ims_malignant = [read(os.path.join(folder_malignant_test, filename)) for filename in
os.listdir(folder_malignant_test)]
X_malignant_test = np.array(ims_malignant, dtype='uint8')

# Create labels
Y_benign = np.zeros(X_benign.shape[0])
Y_malignant = np.ones(X_malignant.shape[0])

Y_benign_test = np.zeros(X_benign_test.shape[0])
Y_malignant_test = np.ones(X_malignant_test.shape[0])

# Merge data
X_train = np.concatenate((X_benign, X_malignant), axis = 0)
Y_train = np.concatenate((Y_benign, Y_malignant), axis = 0)

X_test = np.concatenate((X_benign_test, X_malignant_test), axis = 0)


Y_test = np.concatenate((Y_benign_test, Y_malignant_test), axis = 0)

# Shuffle data
s = np.arange(X_train.shape[0])
np.random.shuffle(s)
X_train = X_train[s]
Y_train = Y_train[s]

s = np.arange(X_test.shape[0])
np.random.shuffle(s)
X_test = X_test[s]
Y_test = Y_test[s]
In [37]:
fig=plt.figure(figsize=(20, 50))
columns = 5
rows = 10

for i in range(1, columns*rows +1):


ax = fig.add_subplot(rows, columns, i)
if Y_train[i] == 0:
ax.title.set_text('Benign')
else:
ax.title.set_text('Malignant')
plt.imshow(X_train[i], interpolation='nearest')
plt.show()
In [38]:
print('Shapes: \n')
print('X_train: ',X_train.shape,' \n')
print('Y_train: ',Y_train.shape,' \n')
print('X_test : ',X_test.shape,' \n')
print('Y_test : ',Y_test.shape,' \n')
print('Y_train malginant Distrib: ',(Y_train.sum()/len(Y_train)).round(4), '\n')
print('Y_test malginant Distrib: ',(Y_test.sum()/len(Y_test)).round(4), '\n')
Shapes:

X_train: (2637, 224, 224, 3)

Y_train: (2637,)

X_test : (660, 224, 224, 3)

Y_test : (660,)

Y_train malginant Distrib: 0.4539

Y_test malginant Distrib: 0.4545

In [39]:
#Subsampling to prior CNN model evaluation
Sample1 = np.random.choice(range(0,len(X_train)), size=1000, replace=False)
Sample1
Sample2 = np.random.choice(range(0,len(X_test)), size=600, replace=False)
Sample2
Out[39]:
array([526, 10, 469, 253, 4, 452, 384, 139, 341, 593, 193, 626, 451,
197, 466, 301, 458, 227, 332, 159, 135, 561, 454, 574, 217, 607,
254, 50, 654, 241, 101, 290, 356, 514, 132, 402, 578, 632, 123,
315, 130, 338, 334, 495, 347, 138, 390, 90, 119, 210, 615, 491,
158, 155, 508, 524, 411, 32, 419, 249, 506, 313, 658, 456, 407,
321, 258, 149, 555, 596, 476, 622, 120, 54, 414, 225, 541, 362,
389, 391, 76, 614, 641, 463, 518, 372, 203, 162, 444, 413, 370,
230, 83, 287, 144, 204, 11, 484, 549, 457, 1, 18, 95, 168,
228, 348, 355, 399, 45, 211, 277, 107, 450, 52, 43, 597, 22,
435, 589, 42, 285, 453, 294, 569, 652, 404, 363, 250, 353, 60,
520, 479, 542, 137, 166, 261, 180, 128, 316, 179, 335, 56, 118,
212, 239, 237, 141, 276, 16, 576, 349, 564, 30, 445, 378, 368,
242, 233, 505, 337, 446, 320, 382, 616, 305, 201, 582, 386, 238,
583, 19, 265, 408, 609, 546, 395, 148, 28, 247, 385, 51, 48,
643, 371, 39, 331, 610, 367, 234, 468, 551, 280, 392, 202, 303,
529, 364, 251, 289, 38, 350, 206, 525, 281, 352, 426, 264, 306,
503, 625, 325, 361, 207, 61, 299, 187, 433, 256, 422, 553, 160,
31, 619, 170, 298, 279, 124, 500, 262, 403, 629, 333, 600, 192,
70, 17, 96, 109, 359, 441, 397, 161, 455, 519, 581, 85, 394,
106, 464, 630, 184, 169, 436, 200, 388, 442, 470, 15, 633, 638,
80, 594, 131, 533, 112, 127, 475, 21, 563, 584, 543, 93, 650,
570, 406, 618, 37, 620, 412, 416, 97, 567, 292, 421, 473, 635,
538, 579, 573, 424, 46, 189, 300, 263, 485, 291, 571, 270, 26,
420, 599, 521, 568, 653, 310, 23, 510, 471, 637, 459, 115, 417,
628, 2, 345, 515, 226, 429, 186, 164, 62, 177, 460, 122, 40,
86, 171, 229, 218, 535, 282, 377, 103, 236, 448, 398, 195, 447,
199, 379, 612, 474, 326, 58, 410, 223, 558, 432, 493, 327, 601,
221, 183, 53, 640, 175, 560, 77, 472, 165, 415, 283, 114, 25,
547, 151, 173, 336, 531, 284, 548, 214, 5, 35, 33, 512, 649,
393, 172, 645, 272, 523, 82, 79, 423, 209, 71, 437, 646, 522,
405, 304, 511, 6, 99, 357, 269, 430, 552, 216, 611, 323, 360,
215, 383, 143, 317, 631, 381, 656, 586, 324, 191, 566, 617, 545,
81, 461, 185, 194, 516, 374, 105, 219, 358, 121, 477, 534, 449,
492, 182, 588, 483, 513, 88, 222, 196, 651, 205, 156, 150, 340,
319, 559, 102, 278, 59, 311, 245, 129, 198, 487, 72, 73, 602,
267, 659, 375, 163, 396, 297, 489, 266, 116, 550, 255, 365, 499,
66, 7, 104, 94, 188, 329, 496, 425, 259, 12, 92, 257, 624,
592, 78, 275, 260, 246, 68, 307, 376, 98, 142, 271, 208, 387,
153, 647, 613, 8, 544, 190, 3, 14, 478, 328, 380, 509, 572,
590, 273, 243, 504, 29, 9, 286, 540, 47, 69, 20, 44, 528,
401, 627, 644, 41, 498, 181, 24, 220, 344, 481, 577, 604, 507,
636, 152, 0, 621, 343, 240, 145, 530, 575, 354, 502, 598, 74,
642, 330, 133, 296, 497, 57, 634, 27, 400, 100, 580, 603, 111,
295, 89, 532, 108, 244, 565, 346, 339, 639, 49, 134, 434, 302,
87, 465, 312, 224, 462, 490, 157, 351, 595, 63, 64, 488, 585,
84, 213, 608, 235, 557, 655, 418, 606, 527, 309, 342, 293, 591,
167, 482])
In [40]:
X_train_sample = X_train[Sample1]
Y_train_sample = Y_train[Sample1]
X_test_sample = X_test[Sample2]
Y_test_sample = Y_test[Sample2]
In [41]:
X_train_sample.shape
Out[41]:
(1000, 224, 224, 3)
In [42]:
Y_train_sample.shape
Out[42]:
(1000,)
In [43]:
# Prior model for subsample
num_classes = 1
SIZE = 224 # Image Sizes
kern_size = 3

# padding="same", kernel_initializer="glorot_uniform"

model = Sequential()
model.add(Conv2D(filters=20, kernel_size=(kern_size, kern_size), activation="relu",
input_shape=(SIZE, SIZE, 3)))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.3))

model.add(Conv2D(filters=20, kernel_size=(kern_size, kern_size), activation='relu'))


model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.3))

model.add(Conv2D(filters=20, kernel_size=(kern_size, kern_size),activation='relu'))


model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.2))

model.add(Flatten())

model.add(Dense(128,activation='relu'))
model.add(Dense(128,activation='relu'))
#model.add(Dense(num_classes, activation='softmax')) # For multiclassification
model.add(Dense(1, activation='sigmoid'))
model.summary()

# model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['acc'])


# For multiclassification
# model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['acc']) #
For multiclassification

model.compile(loss='binary_crossentropy', optimizer='Adam',
metrics=["BinaryAccuracy"])
Model: "sequential_12"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_36 (Conv2D) (None, 222, 222, 20) 560
_________________________________________________________________
max_pooling2d_36 (MaxPooling (None, 111, 111, 20) 0
_________________________________________________________________
dropout_70 (Dropout) (None, 111, 111, 20) 0
_________________________________________________________________
conv2d_37 (Conv2D) (None, 109, 109, 20) 3620
_________________________________________________________________
max_pooling2d_37 (MaxPooling (None, 54, 54, 20) 0
_________________________________________________________________
dropout_71 (Dropout) (None, 54, 54, 20) 0
_________________________________________________________________
conv2d_38 (Conv2D) (None, 52, 52, 20) 3620
_________________________________________________________________
max_pooling2d_38 (MaxPooling (None, 26, 26, 20) 0
_________________________________________________________________
dropout_72 (Dropout) (None, 26, 26, 20) 0
_________________________________________________________________
flatten_12 (Flatten) (None, 13520) 0
_________________________________________________________________
dense_46 (Dense) (None, 128) 1730688
_________________________________________________________________
dense_47 (Dense) (None, 128) 16512
_________________________________________________________________
dense_48 (Dense) (None, 1) 129
=================================================================
Total params: 1,755,129
Trainable params: 1,755,129
Non-trainable params: 0
_________________________________________________________________
In [44]:
# Model evaluation in subsample
batch_size = 20
epochs = 50

history = model.fit(
X_train_sample, Y_train_sample,
epochs=epochs,
batch_size = batch_size,
validation_data=(X_test_sample, Y_test_sample),
verbose=2)
Epoch 1/50
50/50 - 2s - loss: 16.2853 - binary_accuracy: 0.5710 - val_loss: 0.6924 -
val_binary_accuracy: 0.5917
Epoch 2/50
50/50 - 1s - loss: 0.6559 - binary_accuracy: 0.7040 - val_loss: 0.5696 -
val_binary_accuracy: 0.6633
Epoch 3/50
50/50 - 1s - loss: 0.5425 - binary_accuracy: 0.7360 - val_loss: 0.5298 -
val_binary_accuracy: 0.7067
Epoch 4/50
50/50 - 1s - loss: 0.5149 - binary_accuracy: 0.7500 - val_loss: 0.5289 -
val_binary_accuracy: 0.7450
Epoch 5/50
50/50 - 1s - loss: 0.4646 - binary_accuracy: 0.7610 - val_loss: 0.5281 -
val_binary_accuracy: 0.7700
Epoch 6/50
50/50 - 1s - loss: 0.4552 - binary_accuracy: 0.7630 - val_loss: 0.5171 -
val_binary_accuracy: 0.7583
Epoch 7/50
50/50 - 1s - loss: 0.4415 - binary_accuracy: 0.7710 - val_loss: 0.5207 -
val_binary_accuracy: 0.7417
Epoch 8/50
50/50 - 1s - loss: 0.4342 - binary_accuracy: 0.7720 - val_loss: 0.5232 -
val_binary_accuracy: 0.7467
Epoch 9/50
50/50 - 1s - loss: 0.4105 - binary_accuracy: 0.7910 - val_loss: 0.5483 -
val_binary_accuracy: 0.7083
Epoch 10/50
50/50 - 1s - loss: 0.4528 - binary_accuracy: 0.7690 - val_loss: 0.5532 -
val_binary_accuracy: 0.7350
Epoch 11/50
50/50 - 1s - loss: 0.4218 - binary_accuracy: 0.7880 - val_loss: 0.5311 -
val_binary_accuracy: 0.7217
Epoch 12/50
50/50 - 1s - loss: 0.3873 - binary_accuracy: 0.8070 - val_loss: 0.5023 -
val_binary_accuracy: 0.7417
Epoch 13/50
50/50 - 1s - loss: 0.4029 - binary_accuracy: 0.8050 - val_loss: 0.5249 -
val_binary_accuracy: 0.7417
Epoch 14/50
50/50 - 1s - loss: 0.4037 - binary_accuracy: 0.8030 - val_loss: 0.4989 -
val_binary_accuracy: 0.7533
Epoch 15/50
50/50 - 1s - loss: 0.3640 - binary_accuracy: 0.8230 - val_loss: 0.4880 -
val_binary_accuracy: 0.7433
Epoch 16/50
50/50 - 1s - loss: 0.3717 - binary_accuracy: 0.8200 - val_loss: 0.4942 -
val_binary_accuracy: 0.7517
Epoch 17/50
50/50 - 1s - loss: 0.3576 - binary_accuracy: 0.8230 - val_loss: 0.5009 -
val_binary_accuracy: 0.7500
Epoch 18/50
50/50 - 1s - loss: 0.3379 - binary_accuracy: 0.8510 - val_loss: 0.5130 -
val_binary_accuracy: 0.7433
Epoch 19/50
50/50 - 1s - loss: 0.3440 - binary_accuracy: 0.8390 - val_loss: 0.5240 -
val_binary_accuracy: 0.7267
Epoch 20/50
50/50 - 1s - loss: 0.3500 - binary_accuracy: 0.8300 - val_loss: 0.5331 -
val_binary_accuracy: 0.7267
Epoch 21/50
50/50 - 1s - loss: 0.2951 - binary_accuracy: 0.8630 - val_loss: 0.4772 -
val_binary_accuracy: 0.7550
Epoch 22/50
50/50 - 1s - loss: 0.3144 - binary_accuracy: 0.8420 - val_loss: 0.4954 -
val_binary_accuracy: 0.7633
Epoch 23/50
50/50 - 1s - loss: 0.3104 - binary_accuracy: 0.8510 - val_loss: 0.4899 -
val_binary_accuracy: 0.7433
Epoch 24/50
50/50 - 1s - loss: 0.3233 - binary_accuracy: 0.8400 - val_loss: 0.4977 -
val_binary_accuracy: 0.7483
Epoch 25/50
50/50 - 1s - loss: 0.2545 - binary_accuracy: 0.8900 - val_loss: 0.5405 -
val_binary_accuracy: 0.7683
Epoch 26/50
50/50 - 1s - loss: 0.2542 - binary_accuracy: 0.8800 - val_loss: 0.4964 -
val_binary_accuracy: 0.7633
Epoch 27/50
50/50 - 1s - loss: 0.3353 - binary_accuracy: 0.8500 - val_loss: 0.4566 -
val_binary_accuracy: 0.7617
Epoch 28/50
50/50 - 1s - loss: 0.2732 - binary_accuracy: 0.8830 - val_loss: 0.5254 -
val_binary_accuracy: 0.7317
Epoch 29/50
50/50 - 1s - loss: 0.2670 - binary_accuracy: 0.8790 - val_loss: 0.4790 -
val_binary_accuracy: 0.7583
Epoch 30/50
50/50 - 1s - loss: 0.2779 - binary_accuracy: 0.8740 - val_loss: 0.4651 -
val_binary_accuracy: 0.7633
Epoch 31/50
50/50 - 1s - loss: 0.2458 - binary_accuracy: 0.8930 - val_loss: 0.5301 -
val_binary_accuracy: 0.7383
Epoch 32/50
50/50 - 1s - loss: 0.2931 - binary_accuracy: 0.8770 - val_loss: 0.4853 -
val_binary_accuracy: 0.7533
Epoch 33/50
50/50 - 1s - loss: 0.2645 - binary_accuracy: 0.8820 - val_loss: 0.4599 -
val_binary_accuracy: 0.7867
Epoch 34/50
50/50 - 1s - loss: 0.2570 - binary_accuracy: 0.8850 - val_loss: 0.4952 -
val_binary_accuracy: 0.7667
Epoch 35/50
50/50 - 1s - loss: 0.2336 - binary_accuracy: 0.8970 - val_loss: 0.5156 -
val_binary_accuracy: 0.7783
Epoch 36/50
50/50 - 1s - loss: 0.2350 - binary_accuracy: 0.9040 - val_loss: 0.5012 -
val_binary_accuracy: 0.7683
Epoch 37/50
50/50 - 1s - loss: 0.2132 - binary_accuracy: 0.9070 - val_loss: 0.4801 -
val_binary_accuracy: 0.7617
Epoch 38/50
50/50 - 1s - loss: 0.1709 - binary_accuracy: 0.9270 - val_loss: 0.5315 -
val_binary_accuracy: 0.7417
Epoch 39/50
50/50 - 1s - loss: 0.1632 - binary_accuracy: 0.9420 - val_loss: 0.5391 -
val_binary_accuracy: 0.7983
Epoch 40/50
50/50 - 1s - loss: 0.3641 - binary_accuracy: 0.8630 - val_loss: 0.7082 -
val_binary_accuracy: 0.4783
Epoch 41/50
50/50 - 1s - loss: 0.3093 - binary_accuracy: 0.8710 - val_loss: 0.5619 -
val_binary_accuracy: 0.7650
Epoch 42/50
50/50 - 1s - loss: 0.1860 - binary_accuracy: 0.9180 - val_loss: 0.6206 -
val_binary_accuracy: 0.7617
Epoch 43/50
50/50 - 1s - loss: 0.1555 - binary_accuracy: 0.9380 - val_loss: 0.7354 -
val_binary_accuracy: 0.7867
Epoch 44/50
50/50 - 1s - loss: 0.1418 - binary_accuracy: 0.9440 - val_loss: 0.5907 -
val_binary_accuracy: 0.7850
Epoch 45/50
50/50 - 1s - loss: 0.2711 - binary_accuracy: 0.9150 - val_loss: 0.7097 -
val_binary_accuracy: 0.7267
Epoch 46/50
50/50 - 1s - loss: 0.4890 - binary_accuracy: 0.7680 - val_loss: 0.5914 -
val_binary_accuracy: 0.7100
Epoch 47/50
50/50 - 1s - loss: 0.3303 - binary_accuracy: 0.8340 - val_loss: 0.8053 -
val_binary_accuracy: 0.6517
Epoch 48/50
50/50 - 1s - loss: 0.2848 - binary_accuracy: 0.8810 - val_loss: 0.5834 -
val_binary_accuracy: 0.7550
Epoch 49/50
50/50 - 1s - loss: 0.2311 - binary_accuracy: 0.9070 - val_loss: 0.5315 -
val_binary_accuracy: 0.7717
Epoch 50/50
50/50 - 1s - loss: 0.1587 - binary_accuracy: 0.9330 - val_loss: 0.6092 -
val_binary_accuracy: 0.7750
In [45]:
plt.plot(history.history['binary_accuracy'])
plt.plot(history.history['val_binary_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

Model 2
In [46]:
# Final
num_classes = 1
SIZE = 224 # Image Sizes
kern_size = 3

# padding="same", kernel_initializer="glorot_uniform"

model2 = Sequential()
model2.add(Conv2D(filters=20, kernel_size=(kern_size, kern_size), activation="relu",
input_shape=(SIZE, SIZE, 3)))
model2.add(MaxPool2D(pool_size=(2, 2)))
model2.add(Dropout(0.3))

model2.add(Conv2D(filters=20, kernel_size=(kern_size, kern_size), activation='relu'))


model2.add(MaxPool2D(pool_size=(2, 2)))
model2.add(Dropout(0.3))
model2.add(Conv2D(filters=20, kernel_size=(kern_size, kern_size),activation='relu'))
model2.add(MaxPool2D(pool_size=(2, 2)))
model2.add(Dropout(0.2))

model2.add(Flatten())

model2.add(Dense(128,activation='relu'))
model2.add(Dense(128,activation='relu'))
#model.add(Dense(num_classes, activation='softmax')) # For multiclassification
model2.add(Dense(1, activation='sigmoid'))
model2.summary()

# model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['acc'])


# For multiclassification
# model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['acc']) #
For multiclassification

model2.compile(loss='binary_crossentropy', optimizer='Adam',
metrics=["BinaryAccuracy"])
Model: "sequential_13"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_39 (Conv2D) (None, 222, 222, 20) 560
_________________________________________________________________
max_pooling2d_39 (MaxPooling (None, 111, 111, 20) 0
_________________________________________________________________
dropout_73 (Dropout) (None, 111, 111, 20) 0
_________________________________________________________________
conv2d_40 (Conv2D) (None, 109, 109, 20) 3620
_________________________________________________________________
max_pooling2d_40 (MaxPooling (None, 54, 54, 20) 0
_________________________________________________________________
dropout_74 (Dropout) (None, 54, 54, 20) 0
_________________________________________________________________
conv2d_41 (Conv2D) (None, 52, 52, 20) 3620
_________________________________________________________________
max_pooling2d_41 (MaxPooling (None, 26, 26, 20) 0
_________________________________________________________________
dropout_75 (Dropout) (None, 26, 26, 20) 0
_________________________________________________________________
flatten_13 (Flatten) (None, 13520) 0
_________________________________________________________________
dense_49 (Dense) (None, 128) 1730688
_________________________________________________________________
dense_50 (Dense) (None, 128) 16512
_________________________________________________________________
dense_51 (Dense) (None, 1) 129
=================================================================
Total params: 1,755,129
Trainable params: 1,755,129
Non-trainable params: 0
_________________________________________________________________
In [47]:
# Fine tunning
# Running model in all data
batch_size = 20
epochs = 19

history2 = model2.fit(
X_train, Y_train,
epochs=epochs,
batch_size = batch_size,
validation_data=(X_test, Y_test),
verbose=2)
Epoch 1/19
132/132 - 4s - loss: 12.7344 - binary_accuracy: 0.5904 - val_loss: 0.6947 -
val_binary_accuracy: 0.4545
Epoch 2/19
132/132 - 3s - loss: 0.6393 - binary_accuracy: 0.6325 - val_loss: 0.6972 -
val_binary_accuracy: 0.4545
Epoch 3/19
132/132 - 3s - loss: 0.5762 - binary_accuracy: 0.6815 - val_loss: 0.6971 -
val_binary_accuracy: 0.4833
Epoch 4/19
132/132 - 3s - loss: 0.5277 - binary_accuracy: 0.7345 - val_loss: 0.6414 -
val_binary_accuracy: 0.6288
Epoch 5/19
132/132 - 3s - loss: 0.5238 - binary_accuracy: 0.7368 - val_loss: 0.6749 -
val_binary_accuracy: 0.5561
Epoch 6/19
132/132 - 3s - loss: 0.5090 - binary_accuracy: 0.7527 - val_loss: 0.5420 -
val_binary_accuracy: 0.7348
Epoch 7/19
132/132 - 3s - loss: 0.4848 - binary_accuracy: 0.7535 - val_loss: 0.5406 -
val_binary_accuracy: 0.7303
Epoch 8/19
132/132 - 3s - loss: 0.4838 - binary_accuracy: 0.7505 - val_loss: 0.5034 -
val_binary_accuracy: 0.7424
Epoch 9/19
132/132 - 3s - loss: 0.4653 - binary_accuracy: 0.7653 - val_loss: 0.5160 -
val_binary_accuracy: 0.7409
Epoch 10/19
132/132 - 3s - loss: 0.4577 - binary_accuracy: 0.7675 - val_loss: 0.4967 -
val_binary_accuracy: 0.7455
Epoch 11/19
132/132 - 3s - loss: 0.4385 - binary_accuracy: 0.7808 - val_loss: 0.4731 -
val_binary_accuracy: 0.7667
Epoch 12/19
132/132 - 3s - loss: 0.4365 - binary_accuracy: 0.7763 - val_loss: 0.5244 -
val_binary_accuracy: 0.7136
Epoch 13/19
132/132 - 3s - loss: 0.4402 - binary_accuracy: 0.7793 - val_loss: 0.4847 -
val_binary_accuracy: 0.7545
Epoch 14/19
132/132 - 3s - loss: 0.4266 - binary_accuracy: 0.7888 - val_loss: 0.5176 -
val_binary_accuracy: 0.7227
Epoch 15/19
132/132 - 3s - loss: 0.4175 - binary_accuracy: 0.7895 - val_loss: 0.5451 -
val_binary_accuracy: 0.7258
Epoch 16/19
132/132 - 3s - loss: 0.3999 - binary_accuracy: 0.7956 - val_loss: 0.4612 -
val_binary_accuracy: 0.7576
Epoch 17/19
132/132 - 3s - loss: 0.3860 - binary_accuracy: 0.7990 - val_loss: 0.4369 -
val_binary_accuracy: 0.7803
Epoch 18/19
132/132 - 3s - loss: 0.3671 - binary_accuracy: 0.8100 - val_loss: 0.5177 -
val_binary_accuracy: 0.7212
Epoch 19/19
132/132 - 3s - loss: 0.3667 - binary_accuracy: 0.8161 - val_loss: 0.4502 -
val_binary_accuracy: 0.7561
In [48]:
plt.plot(history2.history['binary_accuracy'])
plt.plot(history2.history['val_binary_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history2.history['loss'])
plt.plot(history2.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

Model 3
In [49]:
# Final
num_classes = 1
SIZE = 224 # Image Sizes
kern_size = 3

# padding="same", kernel_initializer="glorot_uniform"

model3 = Sequential()
model3.add(Conv2D(filters=50, kernel_size=(kern_size, kern_size), activation="relu",
input_shape=(SIZE, SIZE, 3)))
model3.add(MaxPool2D(pool_size=(2, 2)))
model3.add(Dropout(0.3))

model3.add(Conv2D(filters=20, kernel_size=(kern_size, kern_size), activation='relu'))


model3.add(MaxPool2D(pool_size=(2, 2)))
model3.add(Dropout(0.3))

model3.add(Conv2D(filters=20, kernel_size=(kern_size, kern_size),activation='relu'))


model3.add(MaxPool2D(pool_size=(2, 2)))
model3.add(Dropout(0.3))

model3.add(Flatten())

model3.add(Dense(100,activation='relu'))
model3.add(Dropout(0.3))
model3.add(Dense(50,activation='relu'))
model3.add(Dropout(0.3))

#model.add(Dense(num_classes, activation='softmax')) # For multiclassification


model3.add(Dense(1, activation='sigmoid'))
model3.summary()

# model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['acc'])


# For multiclassification
# model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['acc']) #
For multiclassification

model3.compile(loss='binary_crossentropy', optimizer='Adam',
metrics=["BinaryAccuracy"])
Model: "sequential_14"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_42 (Conv2D) (None, 222, 222, 50) 1400
_________________________________________________________________
max_pooling2d_42 (MaxPooling (None, 111, 111, 50) 0
_________________________________________________________________
dropout_76 (Dropout) (None, 111, 111, 50) 0
_________________________________________________________________
conv2d_43 (Conv2D) (None, 109, 109, 20) 9020
_________________________________________________________________
max_pooling2d_43 (MaxPooling (None, 54, 54, 20) 0
_________________________________________________________________
dropout_77 (Dropout) (None, 54, 54, 20) 0
_________________________________________________________________
conv2d_44 (Conv2D) (None, 52, 52, 20) 3620
_________________________________________________________________
max_pooling2d_44 (MaxPooling (None, 26, 26, 20) 0
_________________________________________________________________
dropout_78 (Dropout) (None, 26, 26, 20) 0
_________________________________________________________________
flatten_14 (Flatten) (None, 13520) 0
_________________________________________________________________
dense_52 (Dense) (None, 100) 1352100
_________________________________________________________________
dropout_79 (Dropout) (None, 100) 0
_________________________________________________________________
dense_53 (Dense) (None, 50) 5050
_________________________________________________________________
dropout_80 (Dropout) (None, 50) 0
_________________________________________________________________
dense_54 (Dense) (None, 1) 51
=================================================================
Total params: 1,371,241
Trainable params: 1,371,241
Non-trainable params: 0
_________________________________________________________________
In [50]:
# Fine tunning
# Running model in all data
batch_size = 50
epochs = 20

history3 = model3.fit(
X_train, Y_train,
epochs=epochs,
batch_size = batch_size,
validation_data=(X_test, Y_test),
verbose=2)
Epoch 1/20
53/53 - 7s - loss: 6.8687 - binary_accuracy: 0.5036 - val_loss: 0.6928 -
val_binary_accuracy: 0.5500
Epoch 2/20
53/53 - 6s - loss: 0.6989 - binary_accuracy: 0.5283 - val_loss: 0.6921 -
val_binary_accuracy: 0.5470
Epoch 3/20
53/53 - 6s - loss: 0.6943 - binary_accuracy: 0.5218 - val_loss: 0.6916 -
val_binary_accuracy: 0.5470
Epoch 4/20
53/53 - 6s - loss: 0.6869 - binary_accuracy: 0.5381 - val_loss: 0.6920 -
val_binary_accuracy: 0.5470
Epoch 5/20
53/53 - 6s - loss: 0.6720 - binary_accuracy: 0.5518 - val_loss: 0.6931 -
val_binary_accuracy: 0.4500
Epoch 6/20
53/53 - 6s - loss: 0.6324 - binary_accuracy: 0.5897 - val_loss: 0.6932 -
val_binary_accuracy: 0.4727
Epoch 7/20
53/53 - 6s - loss: 0.5720 - binary_accuracy: 0.6796 - val_loss: 0.5559 -
val_binary_accuracy: 0.7273
Epoch 8/20
53/53 - 6s - loss: 0.5924 - binary_accuracy: 0.6735 - val_loss: 0.5779 -
val_binary_accuracy: 0.6985
Epoch 9/20
53/53 - 6s - loss: 0.5427 - binary_accuracy: 0.7084 - val_loss: 0.5069 -
val_binary_accuracy: 0.7515
Epoch 10/20
53/53 - 6s - loss: 0.5174 - binary_accuracy: 0.7376 - val_loss: 0.4952 -
val_binary_accuracy: 0.7530
Epoch 11/20
53/53 - 6s - loss: 0.5160 - binary_accuracy: 0.7463 - val_loss: 0.4930 -
val_binary_accuracy: 0.7530
Epoch 12/20
53/53 - 6s - loss: 0.5002 - binary_accuracy: 0.7562 - val_loss: 0.4923 -
val_binary_accuracy: 0.7682
Epoch 13/20
53/53 - 6s - loss: 0.5270 - binary_accuracy: 0.7399 - val_loss: 0.5008 -
val_binary_accuracy: 0.7424
Epoch 14/20
53/53 - 6s - loss: 0.4954 - binary_accuracy: 0.7524 - val_loss: 0.4911 -
val_binary_accuracy: 0.7530
Epoch 15/20
53/53 - 6s - loss: 0.4667 - binary_accuracy: 0.7710 - val_loss: 0.4827 -
val_binary_accuracy: 0.7561
Epoch 16/20
53/53 - 6s - loss: 0.5032 - binary_accuracy: 0.7531 - val_loss: 0.5278 -
val_binary_accuracy: 0.7318
Epoch 17/20
53/53 - 6s - loss: 0.4827 - binary_accuracy: 0.7600 - val_loss: 0.5072 -
val_binary_accuracy: 0.7394
Epoch 18/20
53/53 - 6s - loss: 0.4968 - binary_accuracy: 0.7440 - val_loss: 0.4786 -
val_binary_accuracy: 0.7439
Epoch 19/20
53/53 - 6s - loss: 0.4655 - binary_accuracy: 0.7664 - val_loss: 0.4787 -
val_binary_accuracy: 0.7470
Epoch 20/20
53/53 - 6s - loss: 0.4577 - binary_accuracy: 0.7713 - val_loss: 0.4730 -
val_binary_accuracy: 0.7652
In [51]:
plt.plot(history3.history['binary_accuracy'])
plt.plot(history3.history['val_binary_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history3.history['loss'])
plt.plot(history3.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

You might also like