import sys
print(sys.version)
3.12.5 (tags/v3.12.5:ff3bc82, Aug 6 2024, 20:45:27) [MSC v.1940 64 bit (AMD64)]
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('../datasets/bank.csv')
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, 13].values
dataset
RowNumber | CustomerId | Surname | CreditScore | Geography | Gender | Age | Tenure | Balance | NumOfProducts | HasCrCard | IsActiveMember | EstimatedSalary | Exited | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 1 | 15634602 | Hargrave | 619 | France | Female | 42 | 2 | 0.00 | 1 | 1 | 1 | 101348.88 | 1 |
1 | 2 | 15647311 | Hill | 608 | Spain | Female | 41 | 1 | 83807.86 | 1 | 0 | 1 | 112542.58 | 0 |
2 | 3 | 15619304 | Onio | 502 | France | Female | 42 | 8 | 159660.80 | 3 | 1 | 0 | 113931.57 | 1 |
3 | 4 | 15701354 | Boni | 699 | France | Female | 39 | 1 | 0.00 | 2 | 0 | 0 | 93826.63 | 0 |
4 | 5 | 15737888 | Mitchell | 850 | Spain | Female | 43 | 2 | 125510.82 | 1 | 1 | 1 | 79084.10 | 0 |
… | … | … | … | … | … | … | … | … | … | … | … | … | … | … |
9995 | 9996 | 15606229 | Obijiaku | 771 | France | Male | 39 | 5 | 0.00 | 2 | 1 | 0 | 96270.64 | 0 |
9996 | 9997 | 15569892 | Johnstone | 516 | France | Male | 35 | 10 | 57369.61 | 1 | 1 | 1 | 101699.77 | 0 |
9997 | 9998 | 15584532 | Liu | 709 | France | Female | 36 | 7 | 0.00 | 1 | 0 | 1 | 42085.58 | 1 |
9998 | 9999 | 15682355 | Sabbatini | 772 | Germany | Male | 42 | 3 | 75075.31 | 2 | 1 | 0 | 92888.52 | 1 |
9999 | 10000 | 15628319 | Walker | 792 | France | Female | 28 | 4 | 130142.79 | 1 | 1 | 0 | 38190.78 | 0 |
10000 rows × 14 columns
X
array([[619, 'France', 'Female', ..., 1, 1, 101348.88], [608, 'Spain', 'Female', ..., 0, 1, 112542.58], [502, 'France', 'Female', ..., 1, 0, 113931.57], ..., [709, 'France', 'Female', ..., 0, 1, 42085.58], [772, 'Germany', 'Male', ..., 1, 0, 92888.52], [792, 'France', 'Female', ..., 1, 0, 38190.78]], dtype=object)
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
X
array([[619, 0, 0, ..., 1, 1, 101348.88], [608, 2, 0, ..., 0, 1, 112542.58], [502, 0, 0, ..., 1, 0, 113931.57], ..., [709, 0, 0, ..., 0, 1, 42085.58], [772, 1, 1, ..., 1, 0, 92888.52], [792, 0, 0, ..., 1, 0, 38190.78]], dtype=object)
from sklearn.preprocessing import OneHotEncoder
oneX = OneHotEncoder(sparse_output=False)
onehot_encoded = oneX.fit_transform(X[:,1].reshape(-1,1))
print(onehot_encoded)
[[1. 0. 0.] [0. 0. 1.] [1. 0. 0.] ... [1. 0. 0.] [0. 1. 0.] [1. 0. 0.]]
X=np.hstack((onehot_encoded,X[:,:]))
X
array([[1.0, 0.0, 0.0, ..., 1, 1, 101348.88], [0.0, 0.0, 1.0, ..., 0, 1, 112542.58], [1.0, 0.0, 0.0, ..., 1, 0, 113931.57], ..., [1.0, 0.0, 0.0, ..., 0, 1, 42085.58], [0.0, 1.0, 0.0, ..., 1, 0, 92888.52], [1.0, 0.0, 0.0, ..., 1, 0, 38190.78]], dtype=object)
X = np.delete(X, 4, 1) #remove the coloumn 4 along axis =1 - France, Spain, Germany
pd.DataFrame(X)
0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 1.0 | 0.0 | 0.0 | 619 | 0 | 42 | 2 | 0.0 | 1 | 1 | 1 | 101348.88 |
1 | 0.0 | 0.0 | 1.0 | 608 | 0 | 41 | 1 | 83807.86 | 1 | 0 | 1 | 112542.58 |
2 | 1.0 | 0.0 | 0.0 | 502 | 0 | 42 | 8 | 159660.8 | 3 | 1 | 0 | 113931.57 |
3 | 1.0 | 0.0 | 0.0 | 699 | 0 | 39 | 1 | 0.0 | 2 | 0 | 0 | 93826.63 |
4 | 0.0 | 0.0 | 1.0 | 850 | 0 | 43 | 2 | 125510.82 | 1 | 1 | 1 | 79084.1 |
… | … | … | … | … | … | … | … | … | … | … | … | … |
9995 | 1.0 | 0.0 | 0.0 | 771 | 1 | 39 | 5 | 0.0 | 2 | 1 | 0 | 96270.64 |
9996 | 1.0 | 0.0 | 0.0 | 516 | 1 | 35 | 10 | 57369.61 | 1 | 1 | 1 | 101699.77 |
9997 | 1.0 | 0.0 | 0.0 | 709 | 0 | 36 | 7 | 0.0 | 1 | 0 | 1 | 42085.58 |
9998 | 0.0 | 1.0 | 0.0 | 772 | 1 | 42 | 3 | 75075.31 | 2 | 1 | 0 | 92888.52 |
9999 | 1.0 | 0.0 | 0.0 | 792 | 0 | 28 | 4 | 130142.79 | 1 | 1 | 0 | 38190.78 |
10000 rows × 12 columns
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Part 2 - Now let's make the ANN!
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
- output_dim = 6 – No of Nodes you are going to add in Hidden Layer : Why choose 6 ? Input Nodes + Output Nodes/2 11+1/2 :6
- init = ‘uniform’ – It will initialize the Weights
- activation = ‘relu’ : This is your Rectifier Activation Function
classifier.add(Dense(output_dim = 6, init = ‘uniform’, activation = ‘relu’, input_dim = 12))¶
Changes to
classifier.add(Dense(6, kernel_initializer =’uniform’, activation = ‘relu’, input_dim = 12))¶
# Adding the input layer and the first hidden layer
classifier.add(Dense(6, kernel_initializer ='uniform', activation = 'relu', input_dim = 12))
C:\Users\Mehak\AppData\Local\Programs\Python\Python312\Lib\site-packages\keras\src\layers\core\dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead. super().__init__(activity_regularizer=activity_regularizer, **kwargs)
# Adding the second hidden layer
classifier.add(Dense(6, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(1, kernel_initializer = 'uniform', activation = 'sigmoid'))
How Does Adam Work?¶
Adam is different to classical stochastic gradient descent.
Stochastic gradient descent maintains a single learning rate (termed alpha) for all weight updates and the learning rate does not change during training.
- loss = ‘binary_crossentropy’ : Because output is either 1 or 0
- loss = ‘categorical_crossentropy’ : If we have more than 2 category
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
batch_size = 10 : It is the no of observations after which you are going to update the weights
An epoch is defined as a single pass through your entire training set while training a machine learning model. In a single epoch, all training samples are presented to your model once. So the total number of epochs in training a model gives the number of cycles through the entire training datasest.
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 10, epochs=100)
Epoch 1/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - accuracy: 0.7890 - loss: 0.5594 Epoch 2/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8005 - loss: 0.4292 Epoch 3/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.8041 - loss: 0.4167 Epoch 4/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8006 - loss: 0.4243 Epoch 5/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8217 - loss: 0.4168 Epoch 6/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8231 - loss: 0.4236 Epoch 7/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8278 - loss: 0.4112 Epoch 8/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8336 - loss: 0.4105 Epoch 9/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.8306 - loss: 0.4121 Epoch 10/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8275 - loss: 0.4270 Epoch 11/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8338 - loss: 0.4087 Epoch 12/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8326 - loss: 0.4060 Epoch 13/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8321 - loss: 0.4110 Epoch 14/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8328 - loss: 0.4134 Epoch 15/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8326 - loss: 0.4131 Epoch 16/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8349 - loss: 0.4125 Epoch 17/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8344 - loss: 0.4076 Epoch 18/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8261 - loss: 0.4211 Epoch 19/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8305 - loss: 0.4132 Epoch 20/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8327 - loss: 0.4024 Epoch 21/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8341 - loss: 0.4097 Epoch 22/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8345 - loss: 0.4067 Epoch 23/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8284 - loss: 0.4102 Epoch 24/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8329 - loss: 0.4107 Epoch 25/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.8277 - loss: 0.4160 Epoch 26/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8329 - loss: 0.4037 Epoch 27/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8356 - loss: 0.4022 Epoch 28/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8419 - loss: 0.3947 Epoch 29/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.8370 - loss: 0.4003 Epoch 30/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8343 - loss: 0.4069 Epoch 31/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8329 - loss: 0.4027 Epoch 32/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8310 - loss: 0.4034 Epoch 33/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8360 - loss: 0.4059 Epoch 34/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.8297 - loss: 0.4118 Epoch 35/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8365 - loss: 0.4013 Epoch 36/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8320 - loss: 0.4057 Epoch 37/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8441 - loss: 0.3949 Epoch 38/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8300 - loss: 0.4092 Epoch 39/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.8314 - loss: 0.4068 Epoch 40/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8383 - loss: 0.4013 Epoch 41/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8346 - loss: 0.4029 Epoch 42/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.8269 - loss: 0.4103 Epoch 43/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8245 - loss: 0.4146 Epoch 44/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8272 - loss: 0.4087 Epoch 45/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8383 - loss: 0.3933 Epoch 46/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8372 - loss: 0.4042 Epoch 47/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8365 - loss: 0.3988 Epoch 48/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8339 - loss: 0.4048 Epoch 49/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.8401 - loss: 0.3850 Epoch 50/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8314 - loss: 0.4097 Epoch 51/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8389 - loss: 0.3974 Epoch 52/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8396 - loss: 0.3972 Epoch 53/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8356 - loss: 0.3962 Epoch 54/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.8320 - loss: 0.4033 Epoch 55/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8419 - loss: 0.3958 Epoch 56/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8354 - loss: 0.4061 Epoch 57/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8332 - loss: 0.4038 Epoch 58/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8344 - loss: 0.3931 Epoch 59/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8324 - loss: 0.4019 Epoch 60/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8289 - loss: 0.4106 Epoch 61/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8340 - loss: 0.4014 Epoch 62/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8415 - loss: 0.3958 Epoch 63/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.8348 - loss: 0.4018 Epoch 64/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - accuracy: 0.8342 - loss: 0.4020 Epoch 65/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8276 - loss: 0.4037 Epoch 66/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.8372 - loss: 0.3995 Epoch 67/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8331 - loss: 0.4112 Epoch 68/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8349 - loss: 0.3996 Epoch 69/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8304 - loss: 0.4051 Epoch 70/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.8376 - loss: 0.3990 Epoch 71/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8354 - loss: 0.3984 Epoch 72/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8355 - loss: 0.4017 Epoch 73/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8462 - loss: 0.3857 Epoch 74/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8374 - loss: 0.4009 Epoch 75/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8280 - loss: 0.4142 Epoch 76/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8342 - loss: 0.4065 Epoch 77/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8375 - loss: 0.3879 Epoch 78/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.8331 - loss: 0.4022 Epoch 79/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8449 - loss: 0.3850 Epoch 80/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8367 - loss: 0.3970 Epoch 81/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8427 - loss: 0.3908 Epoch 82/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8393 - loss: 0.3935 Epoch 83/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8356 - loss: 0.4025 Epoch 84/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8426 - loss: 0.3870 Epoch 85/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8379 - loss: 0.3903 Epoch 86/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8410 - loss: 0.3881 Epoch 87/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8380 - loss: 0.3918 Epoch 88/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8317 - loss: 0.4028 Epoch 89/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8371 - loss: 0.3918 Epoch 90/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8380 - loss: 0.4002 Epoch 91/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8321 - loss: 0.4005 Epoch 92/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8336 - loss: 0.3978 Epoch 93/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8294 - loss: 0.4023 Epoch 94/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8442 - loss: 0.3793 Epoch 95/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.8379 - loss: 0.3945 Epoch 96/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8318 - loss: 0.4013 Epoch 97/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8329 - loss: 0.4009 Epoch 98/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8405 - loss: 0.3910 Epoch 99/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 3s 3ms/step - accuracy: 0.8393 - loss: 0.3886 Epoch 100/100 800/800 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.8348 - loss: 0.3926
<keras.src.callbacks.history.History at 0x277980890d0>
# Part 3 - Making the predictions and evaluating the model
# Predicting the Test set results
y_pred = classifier.predict(X_test)
print(y_pred)
63/63 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step [[0.24961196] [0.34071332] [0.20098516] ... [0.2331133 ] [0.13070899] [0.15654936]]
y_pred = (y_pred > 0.6)
y_pred
array([[False], [False], [False], ..., [False], [False], [False]])
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
array([[1565, 30], [ 290, 115]], dtype=int64)
score = classifier.evaluate(X_test, y_test, verbose=0)
print(score)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
[0.39345479011535645, 0.8420000076293945] Test loss: 0.39345479011535645 Test accuracy: 0.8420000076293945
classifier.summary()
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │ dense (Dense) │ (None, 6) │ 78 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dense_1 (Dense) │ (None, 6) │ 42 │ ├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤ │ dense_2 (Dense) │ (None, 1) │ 7 │ └──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
Total params: 383 (1.50 KB)
Trainable params: 127 (508.00 B)
Non-trainable params: 0 (0.00 B)
Optimizer params: 256 (1.00 KB)