Potato Disease Classification Using CNN

You might also like

Download as pdf
Download as pdf
You are on page 1of 21
In[ ]: In [2]: In [3]: Potato Disease Classification Using CNN # improt data from kaggel to google cloab # some comnnand is need wimkdir -p ~/.kaggle #!cp kaggle. json ~/.kaggle/ # kaggle dataset API #Ikaggle datasets download -d arjuntejaswi/plant-village 4the data download is zip file convert the normal file #import zipfile #zip_ref = zipfile.ZipFile('/content/plant-village.zip', ‘r') azip_ref.extractall(‘/content') azip_ref.close() In (4) In [5]: In [6]: In [7] out [7]: In [8]: out (8): In [9]: out [9]: # import Libraries import warnings warnings. filterwarnings('ignore’) import tensorflow as tf from tensorflow.keras.preprocessing.image import InageDataGenerator from tensorflow.keras import Sequential from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2 import numpy as np import os import matplotlib.pyplot as plt Xmatplotlib inline # some important varibles batch_size = 32 iimage_size = 256 channels = 3 ephocs = 50 dataset = tf.keras.preprocessing. image_dataset_from_directory( */content /drive/MyDrive/CNN_Poteto_project/PlantVillage’, shuffle =True , image_size =(image_size,image_size) , # image_size batch_size = batch_size #32 64, 64,3) ) Found 2152 files belonging to 3 classes. # class name class_names = dataset.class_names class_names ['Potato__early blight’, ‘Potato__Late_blight', ‘Potato__healthy'] dataset <_BatchDataset element_spec=(TensorSpec(shape=(None, 256, 256, 3), dtype=tf.f loat32, name=None), TensorSpec(shape=(None,), dtype=tf.int32, name=None))> # the batchsize = 32, its mean 1 batch have 32 tranning data # only take one sample (mean 1 batch) dataset. take(1) <_TakeDataset element_spec=(TensorSpec(shape=(None, 256, 256, 3), dtypi oat32, name=None), TensorSpec(shape=(None,), dtype=tf.int32, name=None))> ff In [10]: an (11): In [12]: # only 1 batch points # this is first batch its have totle 32 elements , size is (32,64, 64,3) # and its print all output data in Last "'* array([1, 1, 1, 1, 1, @ 2, 1, @, 1, @, @, 1, @ ®, 1, @ ® 1, 1, @ @ this type output have 3 class (‘Potato__Farly_blight -> @", ‘Potato__Late for element in dataset.take(1): print (element) ( 68%0.8 = 54.4 # validation data (Len(dataset)*10) -> 68*@.1 = 6.8 # test data () # (train data + validation data + test data) = 1 In [16]: train_size = 0.8 len(dataset)*train_size out[16]: 54, 4¢eeeeeeca0eee6 In [17]: # 0-54 all batch is include all train_ds train_ds = dataset.take(s4) Jen(train_ds) out (17): sa In [18]: # above 54 ((55-68)= 14) is test_ds test_ds = dataset.skip(54) len(test_ds) out [18]: 44 In [19]: val_size = 0.1 len(dataset)*0.1 out[19]: 6. 8eeeeeeeeeeaee2 In [20]: # val_ds val_ds = test_ds.take(s) Jen(val_ds) out[20]: 6 In [21]: out (21): In [22]: In [23]: In (24): # test_ds ‘test_ds = test_ds.skip(6) Jen(test_ds) 8 # create a function train_ds , val_ds , test_ds def train_val_test_split(ds,train_split-0.8, test_spli assert (train_split + test_split + val_split) == 1 -1,val_split=0.1, shuff ds_size = len(ds) if shuffle: ds = ds.shuffle(shuffle_size, seed=12) train_size val_size test_size int(ds_size*train_split) int (ds_size*val_split) int (ds_size*test_split) train_ds = ds.take(train_size) val_size = ds.skip(train_size).take(val_size) test_size = ds.skip(train_size).skip(val_size) return train_ds , val_ds , test_ds w#train_ds , val_ds , test_ds = train_val_test_split(dataset) print(‘train_ds_size:',len(train_ds)) print(‘val_ds_size:',1en(val_ds)) print(‘test_ds_size',len(test_ds)) train_ds_size: 54 val_ds_size: 6 test_ds_size 8 In [25]: out [25]: In [26]: In [27]: In [28]: In [29]: In [30]: # incress the perfomance train_ds = train_ds.cache().shuffle(1000). prefetch (buffer_size=tf.data.AUTOTUN val_ds = val_ds.cache().shuffle(1000) .prefetch(buffer_size=tf.data.AUTOTUNE) test_ds = test_ds.cache(). shuffle(1000) .prefetch(buffer_size=t#.data.AUTOTUNE) '''The “tf.data.Dataset.cache()” method caches the dataset in memory so that i The “tf.data.Dataset.shuffle()° method shuffles the dataset in order to random The “tf.data.Dataset.prefetch()” method prefetches the next batch of data from In this case, we are using the “tf.data.Dataset.cache()* method to cache the d EE! > ‘The “tf.data.Dataset.cache()° method caches the dataset in memory so that it can be accessed more quickly in the future. This can be useful for datasets t hat are large or that are accessed frequently. \nThe “tf.data.Dataset.shuffle ()" method shuffles the dataset in order to randomize the order of the data. This can be useful for training models that are sensitive to the order of the data.\nThe “tf.data.Dataset.prefetch()” method prefetches the next batch of d ata from the dataset. This can be useful for speeding up training by reducing the amount of time that the model has to wait for data.\nIn this case, we are using the ~tf.data.Dataset.cache()” method to cache the dataset in menory, th e “tf.data.Dataset.shuffle()” method to shuffle the dataset, and the “tf.dat a.bataset.prefetch()” method to prefetch the next batch of data. This will he 1p to speed up the training process. \n’ for image_batch , labels batch in dataset.take(1): print(image_batch[2]. shape) (256, 256, 3) image_size = 256 # resize the image in (256,256, 3) # rescaling the data 1/255 # add new Layer in CNN architecture resize_and_rescale = tf.keras.Sequential([ tf. keras. layers. experimental . preprocessing. Resizing (image_size, image_size) tf. keras. layers. experimental .preprocessing.Rescaling(1.0/255) 1) # data augmentation data_augmentation = tf.keras.Sequential([ f.keras. layers .experimental . preprocessing .RandomFlip('horizontal_and_vert tf. keras. layers. experimental . preprocessing. RandomRotation(@.3) 1) input_shape = (batch_size, image_size, image_size, channels) nclasses = 3 In [31]: from keras.layers.serialization import activation # CNN arcitecture model = Sequential () # Layer 1: resize_and_recaling model.add(resize_and_rescale) # Layer 2 : data augmentation model .add(data_augmentation) # Layer 3 : Conv2D model .add (Conv20(32,kernel_siz (3,3), activation # Layer 4 : MaxPolling Layer model. add (MaxPooling2D(2,2)) # Layer 5 : Conv2D model .add (Conv20(64,kernel_siz relu')) # Layer 6 : MaxPolling Layer model. add (MaxPooling2D(2,2)) # Layer 7 : Conv2d model .add (Conv20(64,kernel_size=(3,3),activatio relu')) # Layer 8 : MaxPoLling Layer model .add(MaxPooling20(2,2)) # Layer 9 : Conv20 model . add (Conv20(64,kernel_size=(3,3),activatio relu)) # Layer 10 : MaxPoLLing Layer model .add(MaxPooling2D(2,2)) # Layer 11 : Conv2d model .add(Conv20(64,kernel_size=(3,3),activatio # Layer 12 : MaxPolling Layer model . add (MaxPooling2D(2,2)) # Layer 13 : ConvaD model .add (Conv20(64,kernel_siz # Layer 14 : MaxPolling Layer model .add (MaxPooling2D(2,2)) # Layer 14 : Flatten Layer model .add(Flatten()) # Layer 15 : FullyConnected Layer model add (Dense(64, activation='relu')) # Layer 16 : Output Layer elu’ , input_shape=input_shar In [32] In [33] model .add(Dense(n_classes, activation=' softmax')) model. build(input_shape = input_shape) model . summary () Model: "sequential_2" Layer (type) ‘Output Shape Paran # sequential (Sequential) (32, 256, 256, 3) e sequential_1 (Sequential) (32, 256, 256, 3) e conv2d (Conv20) (32, 254, 254, 32) 896 max_pooling2d (MaxPooling20 (32, 127, 127, 32) e ) conv2d_1 (Conv20) (32, 125, 125, 64) 18496 max_pooling2d_1 (MaxPooling (32, 62, 62, 64) e 20) convad_2 (Conv2p) (32, 68, 60, 64) 36928 max_pooling2d_2 (MaxPooling (32, 30, 30, 64) e 2b) conv2d_3 (Conv2D) (32, 28, 28, 64) 36928 max_pooling2d_3 (MaxPooling (32, 14, 14, 64) e 20) conv2d_4 (Conv2p) (32, 12, 12, 64) 36928 max_pooling2d_4 (axPooling (32, 6, 6, 64) e 20) conv2d_5 (Conv20) (32, 4, 4, 64) 36928 max_pooling2d_5 (MaxPooling (32, 2, 2, 64) e 20) flatten (Flatten) (32, 256) e dense (Dense) (32, 64) 16448 dense_1 (Dense) (32, 3) 195 Total params: 183,747 Trainable params: 183,747 Non-trainable params: @ In [34]: In [35]: In [36]: out [36]: In [37]: out [37]: In [38]: out (38): model. compile( optimizer adam’, loss=t¥ .keras. losses. SparseCategoricalCrossentropy (fron_logits-False), metrics=['accuracy'] # Fit the model result = model. fit( train_ds, batch_size=batch_size, validation_data=val_ds, verbose=1, epochs=5@ ) Epoch 1/58 « 54/54 = 162s 376ns/step - loss: 0.8184 - accuracy: 8.6853 - val_loss: @.5714 - val_accuracy: @.7760 Epoch 2/50 54/54 = 3s 63ns/step - loss: 0.5364 - acc uracy: 0.7841 - val_loss: @.3758 - val_accuracy: 0.8594 Epoch 3/50 54/54 - 3s G4ns/step - loss: 0.4192 - acc uracy val_accuracy: @.8385 Epoch 84/54 - 35 G4ns/step - loss: .362 - acc uracy: val_loss: val_accuracy: 6.8385 Epoch 54/54 - 3s 63ns/step - loss: .1881 - acc uracy: val_loss: val_accuracy: .8594 Epoch 54/54 =] - 35 63ms/step - loss: 0.1654 - acc uracy: = val_loss: 0.5126 - val_accuracy: 0.8021 Epoch 7/50 . # opply test data score = model .evaluate(test_ds) score 8/8 - 5s 63ms/step - loss: 8.0621 - accurac yi 0.9698 [2.062097299844026566, 0.9698275923728943] result.params {'verbose': 1, ‘epochs’: 58, ‘steps’: 54} result.history.keys() dict_keys(["loss’, ‘accuracy’, ‘val_loss’, ‘val_accuracy']) In [44] print(result-history['accuracy'] ,'\n', result.history['val_accuracy'] ,"\n', —EEEEEEE > [@.6053240895271301, @.7841435074806213, @.8246527910232544, @.85243058204650 88, @.9276620149612427, 9.9386574029922485, @.9600694179534912, @.87789350748 96213, @,9502314925193787, 0.9722222089767456, @.9432870149612427, 2.95601850 74806213, @.9612268805503845, 0.9751157164573669, @.9791666865348816, 0.96817 12985038757, @.9745370149612427, @.9768518805503845, @.9837962985038757, 0.98 3217597007515, @.9797453880310059, @.9855324029922485, 9.9820601940155029,, 8.9756944179534912, 0.9866898059844971, @.9907407164573669, @.984953701496124 3, 0.9832175970077515, @.9872685074806213, @.9646990895271301, @.990162014961 2427, @,9913194179534912, @.982601940155029, @.9901620149612427, @.983796298 5038757, 0.9866898059844971, 0.9947916865348816, 0.9849537014961243, 0.991898, 1194496155, @.9895833134651184, @.9953703880310059, @.9878472089767456, 0.993 0555820465088, 0.9837962985038757, 0.984375, 0.9866898059844971, 0.9930555820 465088, @.9976851940155029, @.9936342835426331, @.9861111044883728] [0.7760416865348816, 0.859375, 0.8385416865348816, 0.8385416865348816, 0.859 375, @.8020833134651184, ©.9427083134651184, @.828125, @.90625, 0.95833331346 51184, 0.90625, 0.8645833134651184, 0.953125, @.9583333134651184, 0.942708313 4651184, @.9270833134651184, @.921875, @.96875, @.9791666865348816, 0.9739583 134651184, @.9739583134651184, @.9427083134651184, 0.9010416865348816, 0.9739 583134651184, @.9739583134651184, 0.9583333134651184, @.9739583134651184, 0.8 645833134651184, 0.9791666865348816, 0.96875, @.9791666865348816, 0.885416686 5348816, 0.9895833134651184, @.9583333134651184, 0.9947916865348816, 0.979166 6865348816, @.9895833134651184, 0.96875, 0.9791666865348816, 2.97395831346511 84, 0.96875, 0.75, 0.9479166865348816, 0.953125, 8.9739583134651184, 0.942708 3134651184, @.9895833134651184, @.984375, @.898625, 0.9791666865348816] [@.8184162378311157, @.5364281535148621, @.4192309081554413, @. 3601877093315 1245, @.18807995319366455, @.1653698831796646, @.10880177468061447, 8.311014 2002105713, @.13216763734817505, @.07591214776039124, @.15287211537361145, 2. 12372938543558121, @.10860523581504822, 0.07248705625534058, 2.06996825337429 973, @.08615233@0409317, 0.07786063849925995, @.06132197752594948, 2.05569747 0903396606, 0.047415897250175476, 8.041767507791519165, 2.04080614447593689, 9.04156059771776199, 0.07210196554660797, 8.04298942908644676, @.041113641113 24283, @.04901186004281044, @.0471700094640255, 0.04670865088701248, 2.105854 5932173729, @.0337572805583477, @.030140932649374008, .04742691293358803, 0. 028089534491300583, 0.043669119477272034, 8.03909530118107796, @.018235309049 487114, @.04677470028400421, .022309239953756332, @.03530824929475784, 0.020 686127245426178, 0.03754822909832001, 0.02835199423134327, 0.0407732240855693, 8, @.05003655329346657, @.03737992961790085, @.021563975140452385, @.0060829 75305616856, @.015487818978726864, 0.05047176405787468] [0.5714080929756165, 0.37583550810813904, 0.3451823890209198, 0.341338604688 6444, 0.3638831675052643, 0.5126492381095886, @.20280079543590546, @.51334470 51048279, @.24279703199863434, @.1286732256412506, @.24933286011219025, 0.394 9635825157166, 0.08744236081838608, 0.0976257398724556, 0.1716872900724411, 0. 19666238129138947, 0.1794113963842392, @.08682838827371597, 0.0808349400758 7433, @.05649719759821892, 0.08046619594097137, 0.19341568648815155, 0.368343 94931793213, @.07596558332443237, 0.08342621475458145, 0.19945313036441803, 8.06511201709508896, 0.5929842591285706, @.06538686901330948, 0.0728001594543 457, @.09232393652200699, 0.40245071053504944, 0.04457812383770943, 0.848518 9080238342, 0.023146161809563637, 0.07489907741546631, 0.02431360073387623, @.10106629133224487, 0.05129349231719971, 0.06874331086874008, @.061809048056 60248, 0.7284612655639648, 0.1819872409105301, @.15149056911468506, 8.0858762 8602981567, .1595153659582138, @.03328610211610794, @.02342665195465088, 0.5 244826674461365, 0.04961110278964043] In [45]: accuracy = result.history[ ‘accuracy’ ] val_accuracy = result-history[ 'val_accuracy'] loss = result.history['loss*] val_loss = result.history['val_loss"] In [47]: pit. figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(range(epochs), accuracy, label="Training Accuracy") plt.plot(range(epochs), val_accuracy, label='Validation Accuracy’) plt.legend(1oc="lower right”) plt.title('Training and Validation Accuracy’) plt.subplot(1, 2, 2) plt.plot(range(epochs), loss, label='Training Loss’) plt.plot(range(epochs), val_loss, label='Validation Loss’) plt.legend(loc="upper right") plt.title('Training and Validation Loss") plt.show() Training and Validation Accuracy Training and Validation Loss 288. — Taining Loss 08 — Validation Loss 095 o7 0.90 te 0.5 6% 0.80 o4 ors 03 070 o2 ows on — Training accuracy 0.60 — validation Accuracy | 00 ow 2 3% 4 50 do 1) 620 D8 In [52]: for image_batch , label_batch in test_ds.take(1): print (image_batch[2]) print(*\n") print (Label_batch[0]) tf.Tensor( (1[a47. (146. (243. (347. (146. (138. [(143. (aa. (aaa. [aa7. [aa7. [aa2. [[a39. [aa3. [a4s. taas. tas. fas, [[as2. [a53. (151. (a7. (166. (164. [(aa7. [152. [153. [169. [ass. [ass. [[a4a. [aa9. [as3. taza. [as4. (asa. 131. 130. 127. 134, 133. 125. 127. 129. 128. 134, 134, 129. 123. 127. 129. 135. 135. 135. 140. 141. 139. 160. 155. 153. 135. 140. 141. 158. 147. 144, 129. 137. 1. 169. 143. 149. tf.Tensor(1, 142.) 141. ] 138.] 143.) 142.) 134.) 138.] 140.) 139.] 143.) 143.) 138.1] 134.] 138.] 140.) 144.) 144. ] 144.]] 144. ] 145.) 143. ] 166. ] 161.] 159.)] 139.] 144.) 145.) 164.) 4353.] 150.1) 133.] 141.] 145.) 166.] 149. ] 146. ]]], shape=(256, 256, 3), dtype=float32) In [55]: import numpy as np for images_batch, labels_batch in test_ds.take(1): first_image = images_batch[@].numpy().astype( ‘uint8") irst_label = labels_batch[@].numpy() print("first image to predict") plt. imshow(first_image) print("actual label:”,class_names[first_label]) batch_prediction = model. predict (images_batch) print("predicted label:",class_names[np.argmax(batch_prediction[®])]) first image to predict actual label: Potato__Late_blight wi [ predicted label: Potato__late_blight + Qs 42ms/step 0 250 In [56]: # function def predict(model, img): img_array = tf.keras.preprocessing. image. img to_array(images[ i] .numpy()) ing_array = tf.expand_dims(img_array, @) predictions = nodel.predict (img_array) predicted_class = class_names[np.argmax(predictions[0])] confidence = round(1@ * (np.max(predictions[@])), 2) return predicted _class, confidence In [57]: plt.figure(figsize=(15, 15)) for images, labels in test_ds.take(1): for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i] .numpy() .astype("uints")) predicted_class, confidence = predict(model, images[i] .numpy()) actual_class = class_names[labels[i]] plt.title(f"actual: {actual_class},\n Predicted: {predicted_class}.\n plt.axis("off") ai [ ] = @s 453ms/step 1/1 [ ] + @s 20ms/step wi [ ] - @s 20ms/step ai [ ] + @s 23ms/step ai [ ] + @s 2ims/step 1 [ ] + @s 20ms/step wi [ ] + @5 20ms/step ai [ ] + @s 19ms/step wi [ ] + @s 19ms/step ett Ptato_ tary ght, reaketed: roto Ea bight ‘Contaence™ 100.08 Actual fotato_Eory bight. Actual Potto_Late gh, rekted rowio_— Ea bight recite: Potato Lte bight. Comdence=T00 0% Contaence 99.98% “fetua: Potato Faely blight, redictedpotata”_Eay big ‘Confidence 100.0% SS Actual Pato Eat bight. predicted: Potts eal rredicted. potato aty Sigh Confidence 93.99% “confidences 95.30% ‘Actual Potato Early bit, ‘Actual: Potato_Esry bight. freed oa ai free Frey in a ee ‘Actual: Potato Late bight, proccted:Porstetate big. ‘Conitaence 100.05 # save the model import os model . save( '/content /drive/MyDrive/CNN_Poteto_project /Model" ) WARNING: abs1:Found untraced functions such as _jit_compiled_convolution_op, _ jit_compiled_convolution_op, _jit_compiled_convolution_op, _jit_compiled_conv olution_op, _jit_compiled_convolution_op while saving (showing 5 of 6). These functions will not be directly callable after loading.

You might also like