Assingmnet No.
: 2
Name: Gayatri Rajendra Jagadale
Roll No.:2447062
Batch: D
Problem Statement –
Build a Multiclass classifier using the CNN model. Use MNIST or any other suitable dataset. a.
Perform Data Pre-processing b. Define Model and perform training c. Evaluate Results using
confusion matrix.
import numpy as np
import [Link] as plt
import seaborn as sns
from [Link] import mnist from
[Link] import to_categorical from
[Link] import Sequential
from [Link] import Conv2D, MaxPooling2D, Flatten,
Dense, Dropout
from [Link] import confusion_matrix, classification_report
# Load MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Display first 9 images from the training set
[Link](figsize=(6,6)) for i in range(9):
[Link](3,3,i+1)
[Link](x_train[i].reshape(28,28), cmap='gray')
[Link](f"Label: {y_train[i]}") [Link]('off')
plt.tight_layout()
[Link]()
# Reshape data to add channel dimension
x_train = x_train.reshape(-1, 28, 28, 1).astype('float32') / 255.0
x_test = x_test.reshape(-1, 28, 28, 1).astype('float32') / 255.0
# One-hot encode the labels
y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)
model = Sequential([
Conv2D(32, kernel_size=(3,3), activation='relu',
input_shape=(28,28,1)),
MaxPooling2D(pool_size=(2,2)),
Conv2D(64, kernel_size=(3,3), activation='relu'),
MaxPooling2D(pool_size=(2,2)),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(10, activation='softmax') # 10 classes for MNIST
])
D:\Users\shrey\anaconda3\lib\site-packages\keras\src\layers\
convolutional\base_conv.py:107: UserWarning: Do not pass an
`input_shape`/`input_dim` argument to a layer. When using Sequential
models, prefer using an `Input(shape)` object as the first layer in
the model instead.
super().__init__(activity_regularizer=activity_regularizer,
**kwargs)
[Link](optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
history = [Link](x_train, y_train_cat, epochs=5, batch_size=128,
validation_split=0.1)
Epoch 1/5 422/422 ━━━━━━━━━━━━━━━━━━━━ 9s 20ms/step -
accuracy: 0.7977 - loss:
0.6469 - val_accuracy: 0.9817 - val_loss: 0.0636 Epoch 2/5 422/422
━━━━━━━━━━━━━━━━━━━━ 9s 21ms/step - accuracy: 0.9668 -
loss:
0.1096 - val_accuracy: 0.9858 - val_loss: 0.0497 Epoch 3/5 422/422
━━━━━━━━━━━━━━━━━━━━ 8s 20ms/step - accuracy: 0.9775 -
loss:
0.0743 - val_accuracy: 0.9883 - val_loss: 0.0423 Epoch 4/5 422/422
━━━━━━━━━━━━━━━━━━━━ 8s 20ms/step - accuracy: 0.9805 -
loss:
0.0638 - val_accuracy: 0.9903 - val_loss: 0.0355 Epoch 5/5 422/422
━━━━━━━━━━━━━━━━━━━━ 9s 22ms/step - accuracy: 0.9852 -
loss:
0.0495 - val_accuracy: 0.9905 - val_loss: 0.0347
y_pred = [Link](x_test)
y_pred_classes = [Link](y_pred, axis=1)
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step
# Evaluate the model on test data
test_loss, test_accuracy = [Link](x_test, y_test_cat,
verbose=0)
print(f"Test Accuracy: {test_accuracy * 100:.2f}%")
Test Accuracy: 99.10%
# Print classification report
print(classification_report(y_test, y_pred_classes))
precision recall f1-score support
0 0.99 1.00 0.99 980
1 1.00 0.99 0.99 1135
2 0.99 0.99 0.99 1032
3 0.99 0.99 0.99 1010
4 0.99 0.99 0.99 982
5 0.98 0.99 0.99 892
6 1.00 0.99 0.99 958
7 0.99 0.99 0.99 1028
8 0.98 0.99 0.99 974
9 0.99 0.99 0.99 1009
accuracy 0.99 10000
macro avg 0.99 0.99 0.99 10000
weighted avg 0.99 0.99 0.99 10000
cm = confusion_matrix(y_test, y_pred_classes)
[Link](figsize=(10,8))
[Link](cm, annot=True, fmt='d', cmap='Blues',
xticklabels=range(10), yticklabels=range(10))
[Link]('Confusion Matrix')
[Link]('Predicted Label') [Link]('True
Label') [Link]()