Cats and Dogs

In [22]:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import os
import PIL
import pathlib
import random
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from keras.preprocessing.image import ImageDataGenerator
In [4]:
#pip install keras
In [5]:
image_dir = '/home/ubuntu/Data/catsdogs/all'
image_set = pathlib.Path(image_dir)

Number of images

In [6]:
no_image = len(list(image_set.glob('*/*.jpg')))
print(no_image)
10028
In [7]:
all_image = list(image_set.glob('*/*.jpg'))
plt.figure(figsize=(12,10))
for i in range(9):
    img_no = random.randrange(0,len(all_image))
    plt.subplot(3,3,i+1)
    plt.title("Img " +str(img_no))
    plt.imshow(PIL.Image.open(str(all_image[img_no])))
    plt.axis('off')
plt.show()
In [8]:
cats= list(image_set.glob('cats/*'))
plt.figure(figsize=(12,10)) 
for i in range(9):
    cat_no = random.randrange(0,len(cats))
    plt.subplot(3,3,i+1)    
    plt.title("Cat "+str(cat_no))
    plt.imshow(PIL.Image.open(str(cats[cat_no])))
    plt.axis('off')
plt.show()
In [9]:
dogs= list(image_set.glob('dogs/*'))
plt.figure(figsize=(12,10)) 
for i in range(9):
    dog_no = random.randrange(0,len(dogs))
    plt.subplot(3,3,i+1)    
    plt.title("Dog "+str(cat_no))
    plt.imshow(PIL.Image.open(str(dogs[dog_no])))
    plt.axis("off")
plt.show()

Data set

In [10]:
batch_size = 32
img_height = 200
img_width = 200
In [11]:
# training dataset
train_img= tf.keras.preprocessing.image_dataset_from_directory(image_set,validation_split=0.2,
                                                               subset='training',seed=100,image_size = (img_height,img_width),
                                                              batch_size=batch_size)
Found 10028 files belonging to 2 classes.
Using 8023 files for training.
In [12]:
 # validating dataset
valid_img= tf.keras.preprocessing.image_dataset_from_directory(image_set,validation_split=0.2,
                                                               subset='validation',seed=100,image_size = (img_height,img_width),
                                                              batch_size=batch_size)
Found 10028 files belonging to 2 classes.
Using 2005 files for validation.

Model

In [13]:
#check the number of models
class_names = train_img.class_names
print(class_names)
['cats', 'dogs']
In [14]:
# image shape and batch size
for image_batch, labels_batch in train_img:
  print(image_batch.shape)
  print(labels_batch.shape)
  break
(32, 200, 200, 3)
(32,)
In [15]:
num_classes = 2

model = Sequential([
    layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height,img_width, 3)),
    layers.Conv2D(16,3, padding = 'same', activation = 'relu'),
    layers.MaxPooling2D(),
    layers.Conv2D(32,3, padding = 'same', activation = 'relu'),
    layers.MaxPooling2D(),
    layers.Conv2D(64,3, padding = 'same', activation = 'relu'),
    layers.MaxPooling2D(),
    layers.Flatten(),
    layers.Dense(128, activation = 'relu'),
    layers.Dense(num_classes)
])

Compile

In [16]:
model.compile(optimizer='adam',loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
In [17]:
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
rescaling (Rescaling)        (None, 200, 200, 3)       0         
_________________________________________________________________
conv2d (Conv2D)              (None, 200, 200, 16)      448       
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 100, 100, 16)      0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 100, 100, 32)      4640      
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 50, 50, 32)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 50, 50, 64)        18496     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 25, 25, 64)        0         
_________________________________________________________________
flatten (Flatten)            (None, 40000)             0         
_________________________________________________________________
dense (Dense)                (None, 128)               5120128   
_________________________________________________________________
dense_1 (Dense)              (None, 2)                 258       
=================================================================
Total params: 5,143,970
Trainable params: 5,143,970
Non-trainable params: 0
_________________________________________________________________

Training

In [18]:
epochs= 10

history = model.fit(train_img, validation_data=valid_img,epochs=epochs)
Epoch 1/10
251/251 [==============================] - 29s 111ms/step - loss: 0.7147 - accuracy: 0.5512 - val_loss: 0.6032 - val_accuracy: 0.6753
Epoch 2/10
251/251 [==============================] - 22s 87ms/step - loss: 0.5927 - accuracy: 0.6771 - val_loss: 0.5428 - val_accuracy: 0.7257
Epoch 3/10
251/251 [==============================] - 22s 86ms/step - loss: 0.5002 - accuracy: 0.7548 - val_loss: 0.5148 - val_accuracy: 0.7387
Epoch 4/10
251/251 [==============================] - 22s 86ms/step - loss: 0.4348 - accuracy: 0.7937 - val_loss: 0.5167 - val_accuracy: 0.7481
Epoch 5/10
251/251 [==============================] - 22s 86ms/step - loss: 0.3667 - accuracy: 0.8420 - val_loss: 0.5756 - val_accuracy: 0.7616
Epoch 6/10
251/251 [==============================] - 22s 87ms/step - loss: 0.2730 - accuracy: 0.8861 - val_loss: 0.6435 - val_accuracy: 0.7596
Epoch 7/10
251/251 [==============================] - 22s 86ms/step - loss: 0.1831 - accuracy: 0.9314 - val_loss: 0.8743 - val_accuracy: 0.7536
Epoch 8/10
251/251 [==============================] - 22s 86ms/step - loss: 0.0986 - accuracy: 0.9624 - val_loss: 0.9676 - val_accuracy: 0.7611
Epoch 9/10
251/251 [==============================] - 22s 86ms/step - loss: 0.0923 - accuracy: 0.9624 - val_loss: 1.0710 - val_accuracy: 0.7446
Epoch 10/10
251/251 [==============================] - 22s 86ms/step - loss: 0.0591 - accuracy: 0.9788 - val_loss: 1.2625 - val_accuracy: 0.7456
In [19]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

#epochs_range = range(1, len(acc) + 1)

epochs_range = range(epochs)

plt.figure(figsize=(10,8))
plt.plot(epochs_range,acc,'b',label = 'Training Accuracy')
plt.plot(epochs_range, val_acc,'r',label='Validation Accuracy')
plt.title('Training and Validation accuracy')
plt.legend()
plt.figure(figsize=(10,8))
plt.plot(epochs_range,loss,'b',label='Training loss')
plt.plot(epochs_range,val_loss,label='Validation loss')
plt.title('Training and Validation loss')
plt.legend()
plt.show()
In [ ]:
 
In [20]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(epochs)

plt.figure(figsize=(14, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='lower left')
plt.title('Training and Validation Loss')
plt.show()
In [44]:
test_datagen = ImageDataGenerator(rescale=1./255)

Model overfitting

In above graph, large diffence in accuracy between training and validation is sign of overfitting

In [23]:
data_augmentation = keras.Sequential(
  [
    layers.experimental.preprocessing.RandomFlip("horizontal", 
                                                 input_shape=(img_height, 
                                                              img_width,
                                                              3)),
    layers.experimental.preprocessing.RandomRotation(0.1),
    layers.experimental.preprocessing.RandomZoom(0.1),
  ]
)
In [27]:
plt.figure(figsize=(10, 10))
for images, _ in train_img.take(1):
  for i in range(9):
    augmented_images = data_augmentation(images)
    ax = plt.subplot(3, 3, i + 1)
    plt.imshow(augmented_images[0].numpy().astype("uint8"))
    plt.axis("off")
In [28]:
model = Sequential([
  data_augmentation,
  layers.experimental.preprocessing.Rescaling(1./255),
  layers.Conv2D(16, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Conv2D(32, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Conv2D(64, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(),
  layers.Dropout(0.2),
  layers.Flatten(),
  layers.Dense(128, activation='relu'),
  layers.Dense(num_classes)
])
In [33]:
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
In [34]:
model.summary()
Model: "sequential_2"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
sequential_1 (Sequential)    (None, 200, 200, 3)       0         
_________________________________________________________________
rescaling_1 (Rescaling)      (None, 200, 200, 3)       0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 200, 200, 16)      448       
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 100, 100, 16)      0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 100, 100, 32)      4640      
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 50, 50, 32)        0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 50, 50, 64)        18496     
_________________________________________________________________
max_pooling2d_5 (MaxPooling2 (None, 25, 25, 64)        0         
_________________________________________________________________
dropout (Dropout)            (None, 25, 25, 64)        0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 40000)             0         
_________________________________________________________________
dense_2 (Dense)              (None, 128)               5120128   
_________________________________________________________________
dense_3 (Dense)              (None, 2)                 258       
=================================================================
Total params: 5,143,970
Trainable params: 5,143,970
Non-trainable params: 0
_________________________________________________________________
In [35]:
epochs = 15
history = model.fit(train_img, validation_data=valid_img, epochs=epochs)
Epoch 1/15
251/251 [==============================] - 29s 111ms/step - loss: 0.7484 - accuracy: 0.5560 - val_loss: 0.6238 - val_accuracy: 0.6843
Epoch 2/15
251/251 [==============================] - 28s 110ms/step - loss: 0.5996 - accuracy: 0.6959 - val_loss: 0.5596 - val_accuracy: 0.7072
Epoch 3/15
251/251 [==============================] - 28s 110ms/step - loss: 0.5636 - accuracy: 0.7178 - val_loss: 0.5411 - val_accuracy: 0.7232
Epoch 4/15
251/251 [==============================] - 28s 110ms/step - loss: 0.5405 - accuracy: 0.7279 - val_loss: 0.5077 - val_accuracy: 0.7431
Epoch 5/15
251/251 [==============================] - 28s 110ms/step - loss: 0.5173 - accuracy: 0.7437 - val_loss: 0.5044 - val_accuracy: 0.7382
Epoch 6/15
251/251 [==============================] - 28s 110ms/step - loss: 0.5082 - accuracy: 0.7528 - val_loss: 0.4917 - val_accuracy: 0.7556
Epoch 7/15
251/251 [==============================] - 28s 110ms/step - loss: 0.4815 - accuracy: 0.7754 - val_loss: 0.4591 - val_accuracy: 0.7791
Epoch 8/15
251/251 [==============================] - 28s 110ms/step - loss: 0.4729 - accuracy: 0.7779 - val_loss: 0.4390 - val_accuracy: 0.7895
Epoch 9/15
251/251 [==============================] - 28s 110ms/step - loss: 0.4561 - accuracy: 0.7902 - val_loss: 0.4417 - val_accuracy: 0.7985
Epoch 10/15
251/251 [==============================] - 28s 110ms/step - loss: 0.4323 - accuracy: 0.7979 - val_loss: 0.4320 - val_accuracy: 0.7925
Epoch 11/15
251/251 [==============================] - 28s 110ms/step - loss: 0.4218 - accuracy: 0.8067 - val_loss: 0.4608 - val_accuracy: 0.7875
Epoch 12/15
251/251 [==============================] - 28s 110ms/step - loss: 0.4206 - accuracy: 0.8081 - val_loss: 0.4130 - val_accuracy: 0.8125
Epoch 13/15
251/251 [==============================] - 28s 110ms/step - loss: 0.4065 - accuracy: 0.8135 - val_loss: 0.4249 - val_accuracy: 0.8000
Epoch 14/15
251/251 [==============================] - 28s 110ms/step - loss: 0.4011 - accuracy: 0.8187 - val_loss: 0.4148 - val_accuracy: 0.8040
Epoch 15/15
251/251 [==============================] - 28s 110ms/step - loss: 0.3836 - accuracy: 0.8246 - val_loss: 0.4149 - val_accuracy: 0.8080
In [51]:
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(epochs)

plt.figure(figsize=(14, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='lower left')
plt.title('Training and Validation Loss')
plt.show()
In [ ]:
 

not completed

In [ ]:
 
In [ ]:
 
In [ ]: