0% found this document useful (0 votes)
19 views

Practice - DL - Ipynb - Colaboratory

The document presents an experiment using a multilayer perceptron algorithm to classify MNIST handwritten digits. It loads the MNIST dataset, builds a neural network model with dense layers, trains it on the dataset, and evaluates the trained model's performance on test data.

Uploaded by

noorullahmd461
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
19 views

Practice - DL - Ipynb - Colaboratory

The document presents an experiment using a multilayer perceptron algorithm to classify MNIST handwritten digits. It loads the MNIST dataset, builds a neural network model with dense layers, trains it on the dataset, and evaluates the trained model's performance on test data.

Uploaded by

noorullahmd461
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 14

Experiment 1: Aim: Multilayer preception algorithm for mnist hand written digital classification

1 import numpy as np
2 from keras.datasets import mnist
3 import matplotlib.pyplot as plt

1 (train_img, train_lab) , (test_img, test_lab) = mnist.load_data()

Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz


11490434/11490434 [==============================] - 0s 0us/step

1 for i in range(1, 21):


2 plt.subplot(4, 5, i)
3 plt.imshow(train_img[i], cmap = 'gray_r')
4 plt.title("Digit: {}".format(train_lab[i]))
5 plt.subplots_adjust(hspace = 0.5)
6 plt.axis("off")

1 print(train_img.shape, test_img.shape)

(60000, 28, 28) (10000, 28, 28)

1 train_img = train_img/255.0
2 test_img = test_img/255.0

1 from keras.models import Sequential


2 from keras.layers import Dense, Flatten
3 model = Sequential()
4 model.add(Flatten(input_shape=(28, 28)))
5 model.add(Dense(512, activation = 'relu'))
6 model.add(Dense(512, activation = 'relu'))
7 model.add(Dense(10, activation = 'softmax'))
8

1 model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'

1 model.fit(train img train lab epochs = 3)


1 model.fit(train_img, train_lab, epochs 3)

Epoch 1/3
1875/1875 [==============================] - 12s 6ms/step - loss: 0.1820 - accuracy: 0.9437
Epoch 2/3
1875/1875 [==============================] - 14s 7ms/step - loss: 0.0814 - accuracy: 0.9745
Epoch 3/3
1875/1875 [==============================] - 12s 6ms/step - loss: 0.0572 - accuracy: 0.9821
<keras.callbacks.History at 0x7fafa5fd49d0>

1 loss_acc = model.evaluate(test_img, test_lab, verbose = 2)

313/313 - 1s - loss: 0.0782 - accuracy: 0.9775 - 594ms/epoch - 2ms/step

1 print(loss_acc[0], loss_acc[1])

0.07816269248723984 0.9775000214576721

1 plt.imshow(test_img[2], cmap= 'gray_r')


2 plt.title("Actual value {}".format(test_lab[2]))
3 plt.axis('off')
4 predict = model.predict(test_img)
5 print("predicted value: ", np.argmax(predict[2]))

313/313 [==============================] - 1s 2ms/step


predicted value: 1

Exp 5

1 import numpy as np
2 import matplotlib.pyplot as plt
3 from keras.models import Sequential
4 from keras.layers import Flatten, Dense, MaxPooling2D, Dropout, Conv2D
1 from keras.datasets import mnist

1 (train_img, train_lab), (test_img, test_lab) = mnist.load_data()

1 model = Sequential()
2 model.add(Conv2D(32, kernel_size=(3,3), activation = 'relu', input_shape=(28, 28, 1)))
3
4 model.add(Conv2D(64, kernel_size=(3,3), activation = 'relu'))
5
6 model.add(MaxPooling2D(pool_size = (2,2)))
7
8 model.add(Dropout(0.25))
9
10 model.add(Flatten())
11
12 model.add(Dense(128, activation = 'relu'))
13
14 model.add(Dropout(0.5))
15
16 model.add(Dense(10, activation = 'softmax'))
17

1 model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'

1 model.fit(train_img, train_lab, epochs = 2)

Epoch 1/2
1875/1875 [==============================] - 115s 61ms/step - loss: 0.4081 - accuracy: 0.9020
Epoch 2/2
1875/1875 [==============================] - 116s 62ms/step - loss: 0.1403 - accuracy: 0.9607
<keras.callbacks.History at 0x7fafa7f72c70>

1 model.save('mymodel.h5')

1 l_a = model.evaluate(test_img, test_lab)

313/313 [==============================] - 5s 16ms/step - loss: 0.0480 - accuracy: 0.9838

1 import keras
2 noor = keras.models.load_model('mymodel.h5')

1 x = np.expand_dims(test_img[3], axis = 0)
2 pred = noor.predict(x)
3 plt.imshow(test_img[3], cmap = 'gray_r')
4 print(np.argmax(pred[0]))
1/1 [==============================] - 0s 27ms/step
0

1 from tensorflow.keras.applications import VGG16


2 model = VGG16(weights = 'imagenet')
3 model.summary()

Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_


553467096/553467096 [==============================] - 3s 0us/step
Model: "vgg16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 224, 224, 3)] 0

block1_conv1 (Conv2D) (None, 224, 224, 64) 1792

block1_conv2 (Conv2D) (None, 224, 224, 64) 36928

block1_pool (MaxPooling2D) (None, 112, 112, 64) 0

block2_conv1 (Conv2D) (None, 112, 112, 128) 73856

block2_conv2 (Conv2D) (None, 112, 112, 128) 147584

block2_pool (MaxPooling2D) (None, 56, 56, 128) 0

block3_conv1 (Conv2D) (None, 56, 56, 256) 295168

block3_conv2 (Conv2D) (None, 56, 56, 256) 590080

block3_conv3 (Conv2D) (None, 56, 56, 256) 590080

block3_pool (MaxPooling2D) (None, 28, 28, 256) 0

block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160

block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808

block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808

block4_pool (MaxPooling2D) (None, 14, 14, 512) 0

block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808

block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808

block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808

block5_pool (MaxPooling2D) (None, 7, 7, 512) 0

flatten (Flatten) (None, 25088) 0

fc1 (Dense) (None, 4096) 102764544

fc2 (Dense) (None, 4096) 16781312


predictions (Dense) (None, 1000) 4097000

=================================================================
Total params: 138,357,544
Trainable params: 138,357,544
Non-trainable params: 0
_________________________________________________________________

1 from tensorflow.keras.preprocessing import image


2 from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions
3 import numpy as np
4 img = image.load_img('images.jpg', color_mode = 'rgb', target_size = (224, 224))

1 display(img)

1 import cv2

1 image = cv2.imread('./images.jpg')

1 from google.colab.patches import cv2_imshow

1 cv2_imshow(image)

1 image = cv2.resize(image, (224, 224))

1 cv2_imshow(image)
1 print(image.shape)
2 print(image)

1 x = np.expand_dims(image, axis = 0)

1 print(x.shape)
2 print(x)

1 pred = model.predict(x)

1/1 [==============================] - 1s 858ms/step

1 print(pred)

1 print(np.argmax(pred[0]))

235

1 p = decode_predictions(pred)
2 print(p)

Downloading data from https://storage.googleapis.com/download.tensorflow.org/data/imagenet_clas


35363/35363 [==============================] - 0s 0us/step
[[('n02106662', 'German_shepherd', 0.9404931), ('n02105162', 'malinois', 0.058496177), ('n02088

1 print(p[0])

[('n02106662', 'German_shepherd', 0.9404931), ('n02105162', 'malinois', 0.058496177), ('n020884

1 print(np.argmax(pred))

235

Exp 8

1 import numpy as np
2 samples = {'jupyter has 79 knows moons .', 'Neptune has 14 confirmed moons ! '}
3 token_index = {}
4 counter = 0
5 for sample in samples:
6 for considered_word in sample.split():
7 if considered_word not in token_index:
8 token_index.update({considered_word : counter + 1})
9 counter = counter + 1
10 token_index

{'jupyter': 1,
'has': 2,
'79': 3,
'knows': 4,
'moons': 5,
'.': 6,
'Neptune': 7,
'14': 8,
'confirmed': 9,
'!': 10}

1 max_length = 6
2 results = np.zeros(shape = (len(samples), max_length, max(token_index.values())+1))

1 print(results)

[[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]

[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]]

1 for i, sample in enumerate(samples):


2 for j, considered_word in list(enumerate(sample.split())):
3 index = token_index.get(considered_word)
4 print(index, token_index[considered_word], considered_word)
5 results[i, j, index] = 1

1 1 jupyter
2 2 has
3 3 79
4 4 knows
5 5 moons
6 6 .
7 7 Neptune
2 2 has
8 8 14
9 9 confirmed
5 5 moons
10 10 !

1 print(results)

[[[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]]

[[0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0.]
[0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0.]
[0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]]]

1
Exp 2

1 import numpy as np
2 from keras.models import Sequential
3 from keras.layers import Dropout, Dense
4 from keras.datasets import imdb

1 (train_data, train_lab), (test_data, test_lab) = imdb.load_data(num_words = 10000)

1 print(train_data, train_lab, sep = '\n\n')

[list([1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25
list([1, 194, 1153, 194, 8255, 78, 228, 5, 6, 1463, 4369, 5012, 134, 26, 4, 715, 8, 118, 1634,
list([1, 14, 47, 8, 30, 31, 7, 4, 249, 108, 7, 4, 5974, 54, 61, 369, 13, 71, 149, 14, 22, 112,
...
list([1, 11, 6, 230, 245, 6401, 9, 6, 1225, 446, 2, 45, 2174, 84, 8322, 4007, 21, 4, 912, 84,
list([1, 1446, 7079, 69, 72, 3305, 13, 610, 930, 8, 12, 582, 23, 5, 16, 484, 685, 54, 349, 11,
list([1, 17, 6, 194, 337, 7, 4, 204, 22, 45, 254, 8, 106, 14, 123, 4, 2, 270, 2, 5, 2, 2, 732,

[1 0 0 ... 0 1 0]

1 print(train_data[0], train_lab[0], test_data[0], test_lab[0] , sep = '\n\n')

[1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25, 100,

[1, 591, 202, 14, 31, 6, 717, 10, 10, 2, 2, 5, 4, 360, 7, 4, 177, 5760, 394, 354, 4, 123, 9, 10

1 print(len(train_data[0]), train_lab[0], len(test_data[0]), test_lab[0] , sep = '\n\n')


2 # print(len(train_data[0]))
3 print(len(train_data), len(test_data))

218

68

0
25000 25000

1 data = np.concatenate((train_data, test_data))


2 lab = np.concatenate((train_lab, test_lab), axis = 0)

1 print(data)

[list([1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25
list([1, 194, 1153, 194, 8255, 78, 228, 5, 6, 1463, 4369, 5012, 134, 26, 4, 715, 8, 118, 1634,
list([1, 14, 47, 8, 30, 31, 7, 4, 249, 108, 7, 4, 5974, 54, 61, 369, 13, 71, 149, 14, 22, 112,
...
list([1, 13, 1408, 15, 8, 135, 14, 9, 35, 32, 46, 394, 20, 62, 30, 5093, 21, 45, 184, 78, 4, 1
list([1, 11, 119, 241, 9, 4, 840, 20, 12, 468, 15, 94, 3684, 562, 791, 39, 4, 86, 107, 8, 97,
list([1, 6, 52, 7465, 430, 22, 9, 220, 2594, 8, 28, 2, 519, 3227, 6, 769, 15, 47, 6, 3482, 406

1 len(data[0])

218
1 len(data)

50000

1 def vecotrize(seq, dims = 10000):


2 results = np.zeros((len(seq), dims))
3 for i, seqen in enumerate(seq):
4 # print(seqen)
5 results[i, seqen] = 1
6 # print(results)
7 return results

1 d = vecotrize(data)
2 # print(d[0])

1 print(len(d[0]))

10000

1 labs = np.array(lab).astype("float32")

1 test_x = d[:10000]
2 test_y = labs[:10000]
3 train_x = d[10000:]
4 train_y = labs[10000:]

1 model = Sequential()
2 model.add(Dense(50, activation='relu', input_shape=(10000, )))
3 model.add(Dropout(0.3, noise_shape = None, seed = None))
4 model.add(Dense(50, activation='relu'))
5 model.add(Dropout(0.2, noise_shape = None, seed = None))
6 model.add(Dense(50, activation='relu'))
7 model.add(Dense(1, activation='sigmoid'))
8 model.compile(optimizer='adam', loss = 'binary_crossentropy', metrics=['accuracy'])

1 history = model.fit(train_x, train_y, epochs = 3, batch_size=(500), validation_data=(test_x, test

Epoch 1/3
80/80 [==============================] - 3s 33ms/step - loss: 0.0530 - accuracy: 0.9818 - val_l
Epoch 2/3
80/80 [==============================] - 3s 34ms/step - loss: 0.0383 - accuracy: 0.9872 - val_l
Epoch 3/3
80/80 [==============================] - 3s 35ms/step - loss: 0.0327 - accuracy: 0.9892 - val_l

1 history_dict = history.history
2 history_dict

{'loss': [0.0529993511736393, 0.0383281335234642, 0.03268621116876602],


'accuracy': [0.9818249940872192, 0.9872499704360962, 0.9892249703407288],
'val_loss': [0.4509594142436981, 0.5197793245315552, 0.5610363483428955],
'val_accuracy': [0.8852999806404114, 0.8819000124931335, 0.8794999718666077]}

1 loss_val = history_dict['loss']
2 loss_valid = history_dict['val_loss']
3 epochs = range(1, len(loss_val)+1)
4 plt.plot(epochs, loss_val, 'bo', label = 'training loss')
5 plt.plot(epochs, loss_valid, 'b', label = 'Validation loss')
6 plt.title('Traing and validation loss')
7 plt.xlabel('epochs')
8 plt.ylabel('loss_val')
9 plt.legend()
10 plt.show()

1 acc_val = history_dict['accuracy']
2 acc_valid = history_dict['val_accuracy']
3 epochs = range(1, len(loss_val)+1)
4 plt.plot(epochs, acc_val, 'bo', label = 'training loss')
5 plt.plot(epochs, acc_valid, 'b', label = 'Validation loss')
6 plt.title('Traing and validation Accuracy')
7 plt.xlabel('epochs')
8 plt.ylabel('loss_val')
9 plt.legend()
10 plt.show()
1

exp 3:

1 import numpy as np
2 import tensorflow as tf
3 from keras.datasets import reuters

1 (train_data, train_lab), (test_data, test_lab) = reuters.load_data(num_words = 10000)

Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/reuters.npz


2110848/2110848 [==============================] - 0s 0us/step

1 print(train_data[0])

[1, 2, 2, 8, 43, 10, 447, 5, 25, 207, 270, 5, 3095, 111, 16, 369, 186, 90, 67, 7, 89, 5, 19, 10

1 word_index = reuters.get_word_index()

Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/reuters_word_


550378/550378 [==============================] - 0s 0us/step

1 print(word_index)

{'mdbl': 10996, 'fawc': 16260, 'degussa': 12089, 'woods': 8803, 'hanging': 13796, 'localized':

1 reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])

1 print(reverse_word_index)

{10996: 'mdbl', 16260: 'fawc', 12089: 'degussa', 8803: 'woods', 13796: 'hanging', 20672: 'local

1 decoded_newswire = ' '.join([reverse_word_index.get(i, '?') for i in train_data[0]])

1 decoded_newswire

1 print(reverse_word_index[5])

said

1 decoded_newswire = ' '.join([reverse_word_index.get(i-3, '?') for i in train_data[0]])

1 decoded_newswire
1 def
'?
' vecotrize(seq,
the
? ?
ofsaid
of mln
as a dims
loss for= of
result 10000):
plc its
saiddecember
at onlyacquisition
ended said of
commonwealth
space co it
could
expects
1 traders
earnings
nowper
april
share
0 ai
2 lnresults
000 dlrs = np.zeros((len(seq),
its it
all said
4 vscash
000 flow
1 mlnper dims))
agreed
sharelargely
this year
april
should
0 arebe2 2
states
50 towill
threebillion
dlrs reuter
total 3
and
' against 0
3 for i, seqen in enumerate(seq):
4 # print(seqen)
5 results[i, seqen] = 1
6 # print(results)
7 return results

1 def one_hot(labels, dimension = 46):


2 res = np.zeros((len(labels), dimension))
3 for i, label in enumerate(labels):
4 res[i, label] = 1
5 return res

1 x_train = vecotrize(train_data)
2 x_test = vecotrize(test_data)

1 y_train = one_hot(train_lab)
2 y_test = one_hot(test_lab)

1 model = Sequential()
2 model.add(Dense(50, activation='relu', input_shape=(10000, )))
3 model.add(Dropout(0.3, noise_shape = None, seed = None))
4 model.add(Dense(50, activation='relu'))
5 model.add(Dropout(0.2, noise_shape = None, seed = None))
6 model.add(Dense(50, activation='relu'))
7 model.add(Dense(46, activation='softmax'))
8 model.compile(optimizer='adam', loss = 'categorical_crossentropy', metrics=['accuracy'])

1 x_val = x_train[:1000]
2 partial_xtrain = x_train[1000:]
3 partial_ytrain = y_train[1000:]
4 y_val = y_train[:1000]

1 history = model.fit(partial_xtrain, partial_ytrain, epochs = 3, batch_size=(500), validation_data

Epoch 1/3
16/16 [==============================] - 2s 65ms/step - loss: 3.3115 - accuracy: 0.3224 - val_l
Epoch 2/3
16/16 [==============================] - 1s 47ms/step - loss: 2.2373 - accuracy: 0.4901 - val_l
Epoch 3/3
16/16 [==============================] - 1s 47ms/step - loss: 1.6899 - accuracy: 0.5992 - val_l

1 history_dict = history.history
2 history_dict

{'loss': [3.3114984035491943, 2.237269401550293, 1.6899131536483765],


'accuracy': [0.32235029339790344, 0.49010273814201355, 0.599223256111145],
'val_loss': [2.632394552230835, 1.7524572610855103, 1.4672969579696655],
'val_accuracy': [0.5289999842643738, 0.5960000157356262, 0.6710000038146973]}

1 import matplotlib.pyplot as plt


2 loss_val = history_dict['loss']
3 loss_valid = history_dict['val_loss']
4 epochs = range(1, len(loss_val)+1)
5 plt.plot(epochs, loss_val, 'bo', label = 'training loss')
6 plt.plot(epochs, loss_valid, 'b', label = 'Validation loss')
7 plt.title('Traing and validation loss')
8 plt.xlabel('epochs')
9 plt.ylabel('loss_val')
10 plt.legend()
11 plt.show()

1 acc_val = history_dict['accuracy']
2 acc_valid = history_dict['val_accuracy']
3 epochs = range(1, len(loss_val)+1)
4 plt.plot(epochs, acc_val, 'bo', label = 'training loss')
5 plt.plot(epochs, acc_valid, 'b', label = 'Validation loss')
6 plt.title('Traing and validation Accuracy')
7 plt.xlabel('epochs')
8 plt.ylabel('loss_val')
9 plt.legend()
10 plt.show()
1

You might also like