Download Dataset¶
Part 1: Train CNN Model¶
Step 1: Import the required packages¶
In [1]:
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
Step 2: Initialising the CNN¶
In [2]:
model = Sequential()
Step 3: Convolution¶
In [3]:
model.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
C:\Users\Mehak\AppData\Local\Programs\Python\Python312\Lib\site-packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Step 4: Pooling¶
In [4]:
model.add(MaxPooling2D(pool_size = (2, 2)))
Step 5: Second convolutional layer¶
In [5]:
model.add(Conv2D(32, (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
Step 6: Flattening¶
In [6]:
model.add(Flatten())
Step 7: Full connection¶
In [7]:
model.add(Dense(units = 128, activation = 'relu'))
model.add(Dense(units = 1, activation = 'sigmoid'))
Step 8: Compiling the CNN¶
In [8]:
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
Step 9: ImageDataGenerator¶
In [9]:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
Step 10: Load the training Set¶
In [10]:
training_set = train_datagen.flow_from_directory('./Dataset/training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
Found 19 images belonging to 2 classes.
Step 11: Classifier Training¶
In [11]:
model.fit(training_set,epochs = 50)
Epoch 1/50
C:\Users\Mehak\AppData\Local\Programs\Python\Python312\Lib\site-packages\keras\src\trainers\data_adapters\py_dataset_adapter.py:122: UserWarning: Your `PyDataset` class should call `super().__init__(**kwargs)` in its constructor. `**kwargs` can include `workers`, `use_multiprocessing`, `max_queue_size`. Do not pass these arguments to `fit()`, as they will be ignored. self._warn_if_super_not_called()
1/1 ━━━━━━━━━━━━━━━━━━━━ 3s 3s/step - accuracy: 0.6842 - loss: 0.6577 Epoch 2/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 891ms/step - accuracy: 0.6842 - loss: 1.3435 Epoch 3/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 405ms/step - accuracy: 0.6842 - loss: 0.6610 Epoch 4/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 449ms/step - accuracy: 0.3158 - loss: 0.7509 Epoch 5/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 512ms/step - accuracy: 0.3158 - loss: 0.7807 Epoch 6/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 446ms/step - accuracy: 0.3158 - loss: 0.7075 Epoch 7/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 383ms/step - accuracy: 0.6842 - loss: 0.6315 Epoch 8/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 453ms/step - accuracy: 0.6842 - loss: 0.5969 Epoch 9/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 413ms/step - accuracy: 0.6842 - loss: 0.5960 Epoch 10/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 466ms/step - accuracy: 0.6842 - loss: 0.6027 Epoch 11/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 404ms/step - accuracy: 0.6842 - loss: 0.6098 Epoch 12/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 415ms/step - accuracy: 0.6842 - loss: 0.6048 Epoch 13/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 452ms/step - accuracy: 0.6842 - loss: 0.5842 Epoch 14/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 392ms/step - accuracy: 0.6842 - loss: 0.5709 Epoch 15/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 446ms/step - accuracy: 0.6842 - loss: 0.5676 Epoch 16/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 475ms/step - accuracy: 0.6842 - loss: 0.5636 Epoch 17/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 441ms/step - accuracy: 0.6842 - loss: 0.5465 Epoch 18/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 528ms/step - accuracy: 0.6842 - loss: 0.5317 Epoch 19/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 559ms/step - accuracy: 0.6842 - loss: 0.5147 Epoch 20/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 568ms/step - accuracy: 0.6842 - loss: 0.5032 Epoch 21/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 529ms/step - accuracy: 0.7368 - loss: 0.4997 Epoch 22/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 490ms/step - accuracy: 0.7895 - loss: 0.4607 Epoch 23/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 449ms/step - accuracy: 0.7895 - loss: 0.4427 Epoch 24/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 464ms/step - accuracy: 0.7895 - loss: 0.4237 Epoch 25/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 454ms/step - accuracy: 0.8421 - loss: 0.3858 Epoch 26/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 494ms/step - accuracy: 0.8421 - loss: 0.3549 Epoch 27/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 595ms/step - accuracy: 0.7895 - loss: 0.3294 Epoch 28/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 442ms/step - accuracy: 1.0000 - loss: 0.2906 Epoch 29/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 483ms/step - accuracy: 0.9474 - loss: 0.2548 Epoch 30/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 479ms/step - accuracy: 0.9474 - loss: 0.2297 Epoch 31/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 469ms/step - accuracy: 1.0000 - loss: 0.2283 Epoch 32/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 405ms/step - accuracy: 0.9474 - loss: 0.1732 Epoch 33/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 421ms/step - accuracy: 1.0000 - loss: 0.1524 Epoch 34/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 528ms/step - accuracy: 0.9474 - loss: 0.1511 Epoch 35/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 518ms/step - accuracy: 1.0000 - loss: 0.1458 Epoch 36/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 579ms/step - accuracy: 0.9474 - loss: 0.1518 Epoch 37/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 413ms/step - accuracy: 1.0000 - loss: 0.0959 Epoch 38/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 412ms/step - accuracy: 1.0000 - loss: 0.0751 Epoch 39/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 500ms/step - accuracy: 1.0000 - loss: 0.0743 Epoch 40/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 438ms/step - accuracy: 1.0000 - loss: 0.0679 Epoch 41/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 490ms/step - accuracy: 1.0000 - loss: 0.0603 Epoch 42/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 492ms/step - accuracy: 1.0000 - loss: 0.0360 Epoch 43/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 443ms/step - accuracy: 1.0000 - loss: 0.0486 Epoch 44/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 406ms/step - accuracy: 1.0000 - loss: 0.0293 Epoch 45/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 439ms/step - accuracy: 1.0000 - loss: 0.0409 Epoch 46/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 415ms/step - accuracy: 1.0000 - loss: 0.0464 Epoch 47/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 480ms/step - accuracy: 1.0000 - loss: 0.0250 Epoch 48/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 361ms/step - accuracy: 1.0000 - loss: 0.0366 Epoch 49/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 500ms/step - accuracy: 1.0000 - loss: 0.0320 Epoch 50/50 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 433ms/step - accuracy: 1.0000 - loss: 0.0359
Out[11]:
<keras.src.callbacks.history.History at 0x16af23dcb00>
Step 12: Convert the Model to json¶
In [12]:
model_json = model.to_json()
with open("./model.json","w") as json_file:
json_file.write(model_json)
Step 13: Save the weights in a seperate file¶
In [13]:
model.save_weights("./model.weights.h5")
print("Classifier trained Successfully!")
Classifier trained Successfully!
Part 2: Test Your Trained Model¶
Step 1: Import the packages¶
In [14]:
import cv2
import numpy as np
from keras.models import model_from_json
from tensorflow.keras.preprocessing import image
Step 2: Load the Model from Json File¶
In [15]:
json_file = open('./model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
Step 3: Load the weights¶
In [16]:
loaded_model.load_weights("./model.weights.h5")
print("Loaded model from disk")
Loaded model from disk
Step 4: Compile the model¶
In [17]:
loaded_model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
Step 5: load the image you want to test¶
In [28]:
test_image = image.load_img('dataset/single_prediction/dog.jpg',target_size=(64,64))
test_image
Out[28]:
In [29]:
test_image = image.img_to_array(test_image)
test_image
Out[29]:
array([[[126., 140., 43.], [139., 150., 54.], [141., 146., 54.], ..., [ 55., 81., 16.], [ 53., 79., 18.], [ 53., 79., 18.]], [[159., 170., 68.], [172., 180., 79.], [169., 168., 75.], ..., [ 58., 88., 18.], [ 55., 84., 18.], [ 52., 81., 14.]], [[172., 178., 70.], [190., 193., 86.], [180., 189., 74.], ..., [ 64., 96., 20.], [ 60., 92., 19.], [ 57., 89., 16.]], ..., [[ 30., 43., 17.], [ 37., 54., 20.], [ 51., 76., 19.], ..., [201., 157., 96.], [172., 138., 90.], [ 81., 81., 47.]], [[ 36., 46., 19.], [ 36., 52., 16.], [ 43., 66., 14.], ..., [199., 149., 88.], [180., 143., 91.], [111., 97., 62.]], [[ 28., 38., 14.], [ 33., 48., 17.], [ 39., 61., 15.], ..., [190., 145., 86.], [184., 144., 92.], [136., 115., 70.]]], dtype=float32)
In [30]:
test_image = np.expand_dims(test_image,axis=0)
test_image
Out[30]:
array([[[[126., 140., 43.], [139., 150., 54.], [141., 146., 54.], ..., [ 55., 81., 16.], [ 53., 79., 18.], [ 53., 79., 18.]], [[159., 170., 68.], [172., 180., 79.], [169., 168., 75.], ..., [ 58., 88., 18.], [ 55., 84., 18.], [ 52., 81., 14.]], [[172., 178., 70.], [190., 193., 86.], [180., 189., 74.], ..., [ 64., 96., 20.], [ 60., 92., 19.], [ 57., 89., 16.]], ..., [[ 30., 43., 17.], [ 37., 54., 20.], [ 51., 76., 19.], ..., [201., 157., 96.], [172., 138., 90.], [ 81., 81., 47.]], [[ 36., 46., 19.], [ 36., 52., 16.], [ 43., 66., 14.], ..., [199., 149., 88.], [180., 143., 91.], [111., 97., 62.]], [[ 28., 38., 14.], [ 33., 48., 17.], [ 39., 61., 15.], ..., [190., 145., 86.], [184., 144., 92.], [136., 115., 70.]]]], dtype=float32)
In [31]:
training_set.class_indices
Out[31]:
{'cats': 0, 'dogs': 1}
Step 6: Predict to which class your input image has been classified¶
In [32]:
result = loaded_model.predict(test_image)
result
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 43ms/step
Out[32]:
array([[1.]], dtype=float32)
In [33]:
predicted_class = np.argmax(result[0]) # Get the index of the highest probability
predicted_class
Out[33]:
0
In [34]:
print(f"Predicted class index: {predicted_class}")
Predicted class index: 0
In [35]:
class_indices = training_set.class_indices
class_indices
Out[35]:
{'cats': 0, 'dogs': 1}
In [36]:
class_names = {v: k for k, v in class_indices.items()}
class_names
Out[36]:
{0: 'cats', 1: 'dogs'}
In [37]:
predicted_class_name = class_names[predicted_class]
print(f"Predicted class name: {predicted_class_name}")
Predicted class name: cats
In [38]:
if result[0][0] == 1:
print("DOG")
else:
print("CAT")
DOG