0% found this document useful (0 votes)
5 views

Dl 5 Excuted

The document outlines the implementation of various machine learning models using TensorFlow, including a Convolutional Neural Network (CNN) for the MNIST dataset, a VGG16 model for CIFAR-10 classification, and a one-hot encoding approach for text classification. It includes data preprocessing, model training, evaluation, and visualization of results. Additionally, it demonstrates the use of accuracy metrics and plotting functions to analyze model performance.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
5 views

Dl 5 Excuted

The document outlines the implementation of various machine learning models using TensorFlow, including a Convolutional Neural Network (CNN) for the MNIST dataset, a VGG16 model for CIFAR-10 classification, and a one-hot encoding approach for text classification. It includes data preprocessing, model training, evaluation, and visualization of results. Additionally, it demonstrates the use of accuracy metrics and plotting functions to analyze model performance.
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 13

# Import necessary libraries

import numpy as np

import tensorflow as tf

from tensorflow.keras.datasets import mnist

from tensorflow.keras.models import Sequential

from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense

import matplotlib.pyplot as plt

# Load MNIST dataset

(train_images, train_labels), (test_images, test_labels) = mnist.load_data()

# Reshape and preprocess the data

train_images = train_images.reshape((train_images.shape[0], 28, 28, 1)).astype('float32') / 255

test_images = test_images.reshape((test_images.shape[0], 28, 28, 1)).astype('float32') / 255

# Convert labels to categorical

train_labels = tf.keras.utils.to_categorical(train_labels)

test_labels = tf.keras.utils.to_categorical(test_labels)

# Define the CNN model

model = Sequential([

Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),

MaxPooling2D((2, 2)),

Conv2D(64, (3, 3), activation='relu'),


waterjug.pl~
MaxPooling2D((2, 2)),

Conv2D(64, (3, 3), activation='relu'),

Flatten(),

Dense(64, activation='relu'),

Dense(10, activation='softmax')

])

# Compile the model

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

# Display training images

plt.figure(figsize=(10, 10))

for i in range(25):

plt.subplot(5, 5, i+1)

plt.imshow(train_images[i].reshape(28, 28), cmap='gray')

plt.title(f"Label: {np.argmax(train_labels[i])}")

plt.axis('off')

plt.show()

# Print the model summary

model.summary()

# Train the model

history = model.fit(train_images, train_labels, epochs=5, batch_size=64, validation_split=0.2)


# Evaluate the model on test data

, test_accuracy = model. test_loss evaluate(test_images, test_labels)

print("\nTest Accuracy:", test_accuracy)

# Print the datasets information

print("\nTraining Dataset:")

print("Number of images:", train_images.shape[0])

print("Image shape:", train_images.shape[1:])

print("Labels shape:", train_labels.shape)

print("\nTesting Dataset:")

print("Number of images:", test_images.shape[0])

print("Image shape:", test_images.shape[1:])

print("Labels shape:", test_labels.shape)

# Function to plot accuracy and loss over epochs

def plot_history(history):

plt.plot(history.history['accuracy'], label='Training Accuracy')

plt.plot(history.history['val_accuracy'], label='Validation Accuracy')

plt.title('Training and Validation Accuracy')

plt.xlabel('Epoch')

plt.ylabel('Accuracy')

plt.legend()

plt.show()
# Plot training history

plot_history(history)
import numpy as np

import tensorflow as tf

from tensorflow.keras.datasets import cifar10

import matplotlib.pyplot as plt

# Load CIFAR-10 dataset

(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()

# Filter out only the images of cats (label 3) and dogs (label 5)

train_indices = np.where((train_labels == 3) | (train_labels == 5))[0]

test_indices = np.where((test_labels == 3) | (test_labels == 5))[0]

train_images = train_images[train_indices]

train_labels = train_labels[train_indices]

test_images = test_images[test_indices]

test_labels = test_labels[test_indices]

# Define class names for better readability

class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']

# Function to display images with labels

def plot_images(images, labels):

plt.figure(figsize=(10, 10))

for i in range(25):
plt.subplot(5, 5, i + 1)

plt.xticks([])

plt.yticks([])

plt.grid(False)

plt.imshow(images[i])

plt.xlabel(class_names[int(labels[i])])

plt.show()

# Display test images with labels

plot_images(test_images, test_labels)

# Calculate accuracy of detection for cats (label 3) and dogs (label 5)

train_accuracy = np.mean(np.array(train_labels.flatten() == 5, dtype=int)) # for dogs

test_accuracy = np.mean(np.array(test_labels.flatten() == 5, dtype=int)) # for dogs

print(f"Accuracy on Training Images: {train_accuracy:.2f}")

print(f"Accuracy on Test Images: {test_accuracy:.2f}")


1. Aim: Use a pre-trained convolution neural
network (VGG16) for image classification.
Source code:
# Import necessary libraries
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.applications import VGG16
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import RMSprop
import matplotlib.pyplot as plt
# Load CIFAR-10 datasetvalues
train_images = train_(train_images, train_labels), (test_images, test_labels)
=cifar10.load_data
# Normalize pixel images.astype('float32') / 255.0()
test_images = test_images.astype('float32') / 255.0

# Load pre-trained VGG16 model without top (fully connected) layers


vgg_base = VGG16(weights='imagenet', include_top=False, input_shape=(32, 32, 3))

# Freeze the convolutional layers


vgg_base.trainable = False

# Create a new model with the pre-trained VGG16 base and additional layers
model = Sequential([
vgg_base,
Flatten(),
Dense(256, activation='relu'),
Dense(10, activation='softmax')
])

# Compile the model


model.compile(optimizer=RMSprop(learning_rate=2e-5),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])

# Display training images


plt.figure(figsize=(10, 10))
for i inrange(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i])
plt.xlabel(get_class_names()[train_labels[i][0]])
plt.show()

1
plt.figure(figsize=(10, 10))
for i inrange(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(test_images[i])
plt.xlabel(get_class_names()[test_labels[i][0]])
plt.show()

2
# Train the model
history = model.fit(train_images, train_labels, epochs=5, batch_size=64,
validation_data=(test_images, test_labels))
Epoch 1/5
782/782━━━━━━━━━━━━━━━━━━━━16s 15ms/step - accuracy:
0.1848 - loss: 2.2483 - val_accuracy: 0.3618 - val_loss: 1.9465
Epoch 2/5
782/782━━━━━━━━━━━━━━━━━━━━8s 11ms/step - accuracy: 0.3839
- loss: 1.8907 - val_accuracy: 0.4210 - val_loss: 1.7493
Epoch 3/5
782/782━━━━━━━━━━━━━━━━━━━━9s 12ms/step - accuracy: 0.4352
- loss: 1.7138 - val_accuracy: 0.4516 - val_loss: 1.6360
Epoch 4/5
782/782━━━━━━━━━━━━━━━━━━━━9s 10ms/step - accuracy: 0.4634
- loss: 1.6136 - val_accuracy: 0.4702 - val_loss: 1.5637
Epoch 5/5
782/782━━━━━━━━━━━━━━━━━━━━10s 11ms/step - accuracy:
0.4865 - loss: 1.5350 - val_accuracy: 0.4882 - val_loss: 1.5121

3
# Plot accuracy graph
plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.show()

# Function to get class names


defget_class_names():
return {
0: 'airplane',
1: 'automobile',
2: 'bird',
3: 'cat',
4: 'deer',
5: 'dog',
6: 'frog',
7: 'horse',
8: 'ship',
9: 'truck'
}

4
1. Aim: Implement One-Hot Encoding of Words or Characters
Source code:
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split

# Sample data
texts = ['hello', 'world', 'deep', 'learning', 'is', 'awesome'] # Input texts
labels = [0, 1, 1, 0, 1, 0] # Corresponding labels (dummy binary labels for
classification)

# One-hot encoding function


def one_hot_encode(texts, vocab_size):
one_hot_encoded = np.zeros((len(texts), max_len, vocab_size))
for i, text in enumerate(texts):
for j, char in enumerate(text):
index = char_indices[char] # Convert char to its index
one_hot_encoded[i, j, index] = 1
return one_hot_encoded

# Print training and testing data


def print_data(X_train, X_test, y_train, y_test):
print("Training Data:")
for text, label in zip(X_train, y_train):
decoded_text = ''.join([vocab[np.argmax(char)] for char in text])
print("Text:", decoded_text, "| Label:", label)

print("\nTesting Data:")
for text, label in zip(X_test, y_test):
decoded_text = ''.join([vocab[np.argmax(char)] for char in text])
print("Text:", decoded_text, "| Label:", label)

# Creating vocabulary
vocab = sorted(set(''.join(texts))) # Extract unique characters and sort
vocab_size = len(vocab)
char_indices = {char: i for i, char in enumerate(vocab)} # Map each character to an
index
max_len = max(map(len, texts)) # Find the length of the longest tex

# One-hot encode texts


one_hot_texts = one_hot_encode(texts, vocab_size)

# Split data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(one_hot_texts, labels,
test_size=0.2, random_state=42)
# Print training and testing data
print_data(X_train, X_test, y_train, y_test)

Training Data:
Text: awesomea | Label: 0
Text: deepaaaa | Label: 1
Text: isaaaaaa | Label: 1
Text: learning | Label: 0

Testing Data:
Text: helloaaa | Label: 0
Text: worldaaa | Label: 1

# Convert data to TensorFlow tensors


X_train_tf = tf.convert_to_tensor(X_train, dtype=tf.float32)
y_train_tf = tf.convert_to_tensor(y_train, dtype=tf.float32)
X_test_tf = tf.convert_to_tensor(X_test, dtype=tf.float32)
y_test_tf = tf.convert_to_tensor(y_test, dtype=tf.float32)

# Build the model


model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(max_len, vocab_size)), # Flatten the input
tf.keras.layers.Dense(64, activation='relu'), # Hidden layer
tf.keras.layers.Dense(1, activation='sigmoid') # Output layer for
binary classification
])

# Compile the model


model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Train the model


model.fit(X_train_tf, y_train_tf, epochs=10, batch_size=1, verbose=1)

Epoch 1/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 1s 6ms/step - accuracy: 0.4333 - loss: 0.7054
Epoch 2/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.4333 - loss: 0.6585
Epoch 3/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.7333 - loss: 0.6138
Epoch 4/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss: 0.6415
Epoch 5/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss: 0.6105
Epoch 6/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss: 0.5872
Epoch 7/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss: 0.5679
Epoch 8/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss: 0.5204
Epoch 9/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss: 0.5298
Epoch 10/10
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 1.0000 - loss: 0.5212
<keras.src.callbacks.history.History at 0x78b838bbc6d0>

# Evaluate the model


test_loss, test_accuracy = model.evaluate(X_test_tf, y_test_tf, verbose=0)
print("\nTest Accuracy:", test_accuracy)

Test Accuracy: 0.0

You might also like