Statement 1:
Train a Deep Neural Network on the MNIST dataset using the Adam optimizer with a learning
rate of 0.001, and generate a classification report and ROC AUC plot.
# Import necessary libraries
import numpy as np
import [Link] as plt
from [Link] import classification_report, roc_auc_score, roc_curve
from [Link] import label_binarize
from [Link] import mnist
from [Link] import Sequential
from [Link] import Dense, Flatten
from [Link] import Adam
from [Link] import to_categorical
from [Link] import RocCurveDisplay
# Load the MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Normalize the images to range [0, 1]
x_train = x_train / 255.0
x_test = x_test / 255.0
# Convert labels to one-hot encoding
y_train_cat = to_categorical(y_train, num_classes=10)
y_test_cat = to_categorical(y_test, num_classes=10)
# Build a simple deep neural network model
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(10, activation='softmax') # 10 output classes
])
# Compile the model using Adam optimizer with learning rate 0.001
optimizer = Adam(learning_rate=0.001)
[Link](optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
history = [Link](x_train, y_train_cat, epochs=5, batch_size=128, validation_split=0.2)
# Evaluate the model
test_loss, test_accuracy = [Link](x_test, y_test_cat, verbose=0)
print(f"Test Accuracy: {test_accuracy:.4f}")
# Predict classes
y_pred_probs = [Link](x_test)
y_pred_classes = [Link](y_pred_probs, axis=1)
# Classification report
print("\nClassification Report:")
print(classification_report(y_test, y_pred_classes))
# ROC AUC Score (Multiclass)
y_test_bin = label_binarize(y_test, classes=[Link](10))
roc_auc = roc_auc_score(y_test_bin, y_pred_probs, average='macro', multi_class='ovr')
print(f"\nMulticlass ROC AUC Score: {roc_auc:.4f}")
# Plot ROC Curves for all classes
[Link](figsize=(12, 8))
for i in range(10):
fpr, tpr, _ = roc_curve(y_test_bin[:, i], y_pred_probs[:, i])
[Link](fpr, tpr, label=f'Class {i} (AUC = {roc_auc_score(y_test_bin[:, i], y_pred_probs[:, i]):.2f})')
[Link]([0, 1], [0, 1], 'k--') # Diagonal line
[Link]('False Positive Rate')
[Link]('True Positive Rate')
[Link]('ROC Curves for MNIST Classification')
[Link](loc='lower right')
[Link](True)
[Link]()
Statement 2
Train a DNN using the SGD optimizer with a learning rate of 0.0001 on the MNIST dataset
and analyze the model's performance.
# Import necessary libraries
import numpy as np
import [Link] as plt
import tensorflow as tf
from [Link] import mnist
from [Link] import Sequential
from [Link] import Dense, Flatten
from [Link] import SGD
from [Link] import to_categorical
# Load the MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Normalize the input data to the range [0, 1]
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
# Convert labels to one-hot encoded vectors
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
# Define the Deep Neural Network model
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(10, activation='softmax') # Output layer for 10 classes
])
# Compile the model with SGD optimizer and low learning rate
optimizer = SGD(learning_rate=0.0001)
[Link](optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
history = [Link](x_train, y_train,
epochs=20,
batch_size=128,
validation_split=0.1,
verbose=2)
# Evaluate the model on test data
test_loss, test_acc = [Link](x_test, y_test, verbose=0)
print(f"\nTest Accuracy: {test_acc * 100:.2f}%")
print(f"Test Loss: {test_loss:.4f}")
# Plot training and validation accuracy and loss
[Link](figsize=(14, 5))
[Link](1, 2, 1)
[Link]([Link]['accuracy'], label='Train Accuracy')
[Link]([Link]['val_accuracy'], label='Val Accuracy')
[Link]('Accuracy over Epochs')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link](True)
[Link](1, 2, 2)
[Link]([Link]['loss'], label='Train Loss')
[Link]([Link]['val_loss'], label='Val Loss')
[Link]('Loss over Epochs')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link](True)
plt.tight_layout()
[Link]()
Statement 3
Train a Deep Neural Network on the MNIST dataset using RMSprop optimizer with a learning
rate of 0.0001, and compare results using an accuracy table and ROC curve.
# Import required libraries
import numpy as np
import pandas as pd
import [Link] as plt
import seaborn as sns
from [Link] import classification_report, roc_curve, auc
from [Link] import label_binarize
from [Link] import mnist
from [Link] import Sequential
from [Link] import Dense, Flatten
from [Link] import RMSprop
from [Link] import to_categorical
# Load MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Normalize pixel values
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
# One-hot encode the labels
y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)
# Build the DNN model
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(10, activation='softmax')
])
# Compile model with RMSprop optimizer
optimizer = RMSprop(learning_rate=0.0001)
[Link](optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
history = [Link](x_train, y_train_cat,
epochs=15,
batch_size=128,
validation_split=0.1,
verbose=2)
# Evaluate model on test set
test_loss, test_acc = [Link](x_test, y_test_cat, verbose=0)
print(f"\nTest Accuracy: {test_acc * 100:.2f}%")
print(f"Test Loss: {test_loss:.4f}")
# Predict class probabilities
y_pred_prob = [Link](x_test)
y_pred_classes = [Link](y_pred_prob, axis=1)
# ------------------ Accuracy Table (Classification Report) ------------------
report = classification_report(y_test, y_pred_classes, output_dict=True)
report_df = [Link](report).transpose()
[Link](figsize=(10, 6))
[Link](report_df.iloc[:-1, :-1], annot=True, fmt=".2f", cmap="Blues")
[Link]("Classification Report (Accuracy Table)")
[Link]()
# ------------------ ROC Curve ------------------
# Binarize labels for multi-class ROC
y_test_bin = label_binarize(y_test, classes=range(10))
fpr = {}
tpr = {}
roc_auc = {}
# Calculate ROC for each class
for i in range(10):
fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_pred_prob[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Plot all ROC curves
[Link](figsize=(10, 8))
for i in range(10):
[Link](fpr[i], tpr[i], label=f'Class {i} (AUC = {roc_auc[i]:.2f})')
[Link]([0, 1], [0, 1], 'k--', label='Random Guess')
[Link]('Multi-Class ROC Curve')
[Link]('False Positive Rate')
[Link]('True Positive Rate')
[Link]()
[Link](True)
[Link]()
Statement 4
Use SGD optimizer with a learning rate of 0.01 to train a DNN on the Wildfire dataset, then
evaluate precision, recall, and F1-score with supporting bar plots.
# Import libraries
import pandas as pd
import numpy as np
import [Link] as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from [Link] import classification_report
from [Link] import StandardScaler
from [Link] import Sequential
from [Link] import Dense
from [Link] import SGD
from [Link] import to_categorical
# Load the Wildfire dataset (adjust file path if needed)
df = pd.read_csv('[Link]')
# Display first few rows
print("Dataset Preview:")
print([Link]())
# Assume last column is the target (binary: 0 = No fire, 1 = Fire)
X = [Link][:, :-1].values
y = [Link][:, -1].values
# Scale the features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# One-hot encode the target if binary classification
y_cat = to_categorical(y)
# Split data into training and testing sets
x_train, x_test, y_train, y_test = train_test_split(X_scaled, y_cat, test_size=0.2, random_state=42)
# Build the DNN model
model = Sequential([
Dense(64, activation='relu', input_shape=([Link][1],)),
Dense(32, activation='relu'),
Dense(2, activation='softmax') # Binary classification with 2 output units
])
# Compile model with SGD optimizer
optimizer = SGD(learning_rate=0.01)
[Link](optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model
history = [Link](x_train, y_train, epochs=20, batch_size=32, validation_split=0.1, verbose=2)
# Evaluate on test data
loss, accuracy = [Link](x_test, y_test, verbose=0)
print(f"\nTest Accuracy: {accuracy*100:.2f}%")
print(f"Test Loss: {loss:.4f}")
# Predict labels for test data
y_pred_probs = [Link](x_test)
y_pred_classes = [Link](y_pred_probs, axis=1)
y_true = [Link](y_test, axis=1)
# Generate classification report
report = classification_report(y_true, y_pred_classes, output_dict=True)
report_df = [Link](report).transpose()
# Display precision, recall, f1-score
metrics_df = report_df[['precision', 'recall', 'f1-score']].iloc[:2] # Only class 0 and 1
# Plotting
metrics_df.plot(kind='bar', figsize=(8, 6), colormap='viridis')
[Link]('Precision, Recall & F1-Score for Wildfire Detection')
[Link]('Class (0 = No Fire, 1 = Fire)')
[Link]('Score')
[Link](0, 1)
[Link](True)
[Link](rotation=0)
[Link](loc='lower right')
plt.tight_layout()
[Link]()
Statement 5
Train a DNN on the Forest Fire dataset using RMSprop optimizer with a learning rate of 0.01.
Report training and validation accuracy
# Import libraries
import pandas as pd
import numpy as np
import [Link] as plt
from sklearn.model_selection import train_test_split
from [Link] import StandardScaler
from [Link] import Sequential
from [Link] import Dense
from [Link] import RMSprop
from [Link] import to_categorical
# Load the Forest Fire dataset (adjust file path if necessary)
df = pd.read_csv('[Link]')
# Show dataset structure
print("Dataset Preview:")
print([Link]())
# Assuming the last column is the binary class (0: no fire, 1: fire)
X = [Link][:, :-1].values
y = [Link][:, -1].values
# Feature scaling
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# One-hot encode target for binary classification
y_cat = to_categorical(y)
# Split into training and test sets
x_train, x_test, y_train, y_test = train_test_split(X_scaled, y_cat, test_size=0.2, random_state=42)
# Build the DNN model
model = Sequential([
Dense(64, activation='relu', input_shape=([Link][1],)),
Dense(32, activation='relu'),
Dense(2, activation='softmax') # 2 neurons for binary classification
])
# Compile with RMSprop optimizer and learning rate of 0.01
optimizer = RMSprop(learning_rate=0.01)
[Link](optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model and store history
history = [Link](x_train, y_train,
epochs=20,
batch_size=32,
validation_split=0.2,
verbose=2)
# Plot training & validation accuracy
[Link](figsize=(8, 6))
[Link]([Link]['accuracy'], label='Training Accuracy', marker='o')
[Link]([Link]['val_accuracy'], label='Validation Accuracy', marker='s')
[Link]('Training and Validation Accuracy')
[Link]('Epochs')
[Link]('Accuracy')
[Link](True)
[Link]()
plt.tight_layout()
[Link]()
Statement 6
Compare DNN training using Adam and SGD optimizers (both with a learning rate of 0.001) on
the Wildfire dataset
# Import necessary libraries
import pandas as pd
import numpy as np
import [Link] as plt
from sklearn.model_selection import train_test_split
from [Link] import StandardScaler
from [Link] import Sequential
from [Link] import Dense
from [Link] import SGD, Adam
from [Link] import to_categorical
# Load Wildfire dataset
df = pd.read_csv('[Link]')
# Display first few rows
print("Dataset Sample:")
print([Link]())
# Feature and label split
X = [Link][:, :-1].values
y = [Link][:, -1].values
# Normalize features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# One-hot encode target
y_cat = to_categorical(y)
# Train-test split
x_train, x_test, y_train, y_test = train_test_split(X_scaled, y_cat, test_size=0.2, random_state=42)
# Define a function to build the model
def build_model(optimizer):
model = Sequential([
Dense(64, activation='relu', input_shape=([Link][1],)),
Dense(32, activation='relu'),
Dense(2, activation='softmax') # Binary classification
])
[Link](optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# Create and train model with SGD optimizer
sgd_model = build_model(SGD(learning_rate=0.001))
history_sgd = sgd_model.fit(x_train, y_train, epochs=20, batch_size=32,
validation_split=0.2, verbose=0)
# Create and train model with Adam optimizer
adam_model = build_model(Adam(learning_rate=0.001))
history_adam = adam_model.fit(x_train, y_train, epochs=20, batch_size=32,
validation_split=0.2, verbose=0)
# Plot training and validation accuracy for both
[Link](figsize=(10, 6))
[Link](history_sgd.history['val_accuracy'], label='SGD - Validation Accuracy', linestyle='--',
marker='o')
[Link](history_adam.history['val_accuracy'], label='Adam - Validation Accuracy', linestyle='-',
marker='s')
[Link](history_sgd.history['accuracy'], label='SGD - Training Accuracy', linestyle='--', alpha=0.7)
[Link](history_adam.history['accuracy'], label='Adam - Training Accuracy', linestyle='-', alpha=0.7)
[Link]('Comparison of DNN Training: SGD vs Adam (lr = 0.001)')
[Link]('Epoch')
[Link]('Accuracy')
[Link](True)
[Link]()
plt.tight_layout()
[Link]()
Statement 7
Image Classification on MNIST Using DNN with Learning Rate Variation
● Use the MNIST dataset and build a DNN
● Train the same model using learning rates: 0.01, 0.001
● Use SGD optimizer and track accuracy for each run
● Plot loss and accuracy for comparison
import numpy as np
import [Link] as plt
from [Link] import mnist
from [Link] import Sequential
from [Link] import Dense, Flatten
from [Link] import SGD
from [Link] import to_categorical
# Load MNIST data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Normalize pixel values
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
# One-hot encode labels
y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)
# Define function to build model
def build_model():
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(10, activation='softmax')
])
return model
# Learning rates to compare
learning_rates = [0.01, 0.001]
# Dictionaries to store histories
histories = {}
for lr in learning_rates:
print(f"\nTraining model with learning rate = {lr}")
model = build_model()
optimizer = SGD(learning_rate=lr)
[Link](optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
history = [Link](x_train, y_train_cat, epochs=15, batch_size=128, validation_split=0.1, verbose=2)
histories[lr] = history
# Plot comparison of loss and accuracy
[Link](figsize=(14, 6))
# Plot Loss
[Link](1, 2, 1)
for lr, history in [Link]():
[Link]([Link]['loss'], label=f'Train Loss (lr={lr})')
[Link]([Link]['val_loss'], linestyle='--', label=f'Val Loss (lr={lr})')
[Link]('Training and Validation Loss')
[Link]('Epochs')
[Link]('Loss')
[Link]()
[Link](True)
# Plot Accuracy
[Link](1, 2, 2)
for lr, history in [Link]():
[Link]([Link]['accuracy'], label=f'Train Acc (lr={lr})')
[Link]([Link]['val_accuracy'], linestyle='--', label=f'Val Acc (lr={lr})')
[Link]('Training and Validation Accuracy')
[Link]('Epochs')
[Link]('Accuracy')
[Link]()
[Link](True)
plt.tight_layout()
[Link]()
Statement 8
Evaluating DNN on CIFAR-10 Using Batch Size Variation
● Load CIFAR-10 dataset
● Use a feed-forward network with BatchNormalization
● Train with batch sizes 32 and 64, keeping other parameters constant
● Use Adam optimizer and train for 10 epochs
● Compare accuracy and plot graphs
import numpy as np
import [Link] as plt
from [Link] import cifar10
from [Link] import Sequential
from [Link] import Dense, Flatten, BatchNormalization, Activation
from [Link] import Adam
from [Link] import to_categorical
# Load CIFAR-10 dataset
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize pixel values
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
# One-hot encode labels
y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)
# Define model builder with BatchNormalization
def build_model():
model = Sequential([
Flatten(input_shape=(32, 32, 3)),
Dense(512),
BatchNormalization(),
Activation('relu'),
Dense(256),
BatchNormalization(),
Activation('relu'),
Dense(128),
BatchNormalization(),
Activation('relu'),
Dense(10, activation='softmax')
])
return model
# Batch sizes to evaluate
batch_sizes = [32, 64]
histories = {}
for batch_size in batch_sizes:
print(f"\nTraining with batch size = {batch_size}")
model = build_model()
[Link](optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
history = [Link](x_train, y_train_cat,
epochs=10,
batch_size=batch_size,
validation_split=0.1,
verbose=2)
histories[batch_size] = history
# Plot accuracy comparison
[Link](figsize=(14, 6))
[Link](1, 2, 1)
for bs, history in [Link]():
[Link]([Link]['accuracy'], label=f'Train Acc (batch={bs})')
[Link]([Link]['val_accuracy'], linestyle='--', label=f'Val Acc (batch={bs})')
[Link]('Training and Validation Accuracy')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link](True)
# Plot loss comparison
[Link](1, 2, 2)
for bs, history in [Link]():
[Link]([Link]['loss'], label=f'Train Loss (batch={bs})')
[Link]([Link]['val_loss'], linestyle='--', label=f'Val Loss (batch={bs})')
[Link]('Training and Validation Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link](True)
plt.tight_layout()
[Link]()
Statement 9
Train a DNN on the UCI dataset using batch size 32 and a learning rate of 0.0001. Evaluate
training time and accuracy
import time
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from [Link] import StandardScaler, LabelEncoder
from [Link] import Sequential
from [Link] import Dense
from [Link] import SGD
from [Link] import to_categorical
# Load UCI Wine Quality Dataset (Red Wine)
url = "[Link]
data = pd.read_csv(url, sep=';')
# Features and target
X = [Link]('quality', axis=1).values
y = data['quality'].values
# Because quality values are integers from 3-8, we will treat this as a classification problem
# Convert target to categorical classes
# First encode labels to consecutive integers starting from 0
label_encoder = LabelEncoder()
y_encoded = label_encoder.fit_transform(y)
num_classes = len([Link](y_encoded))
y_cat = to_categorical(y_encoded, num_classes)
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y_cat, test_size=0.2, random_state=42,
stratify=y_cat)
# Feature scaling
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = [Link](X_test)
# Build simple DNN model
def build_model():
model = Sequential([
Dense(64, input_shape=(X_train.shape[1],), activation='relu'),
Dense(32, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model
# Parameters
batch_size = 32
learning_rate = 0.0001
# Compile model
model = build_model()
optimizer = SGD(learning_rate=learning_rate)
[Link](optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# Train model with timing
start_time = [Link]()
history = [Link](X_train, y_train, epochs=30, batch_size=batch_size, validation_split=0.1,
verbose=2)
end_time = [Link]()
training_time = end_time - start_time
# Evaluate on test data
test_loss, test_accuracy = [Link](X_test, y_test, verbose=0)
print(f"\nTraining time: {training_time:.2f} seconds")
print(f"Test Accuracy: {test_accuracy*100:.2f}%")
Statement 10
Preprocess the Alphabet CSV dataset using label encoding and standard scaling, then train a
simple DNN using batch size 32 and learning rate 0.0001
import pandas as pd
import numpy as np
from [Link] import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from [Link] import Sequential
from [Link] import Dense
from [Link] import SGD
from [Link] import to_categorical
# Load dataset (replace '[Link]' with your actual filename/path)
# For demonstration, assuming the last column is the target (alphabet labels)
data = pd.read_csv('[Link]')
# Separate features and target
X = [Link][:, :-1]
y = [Link][:, -1]
# Label encode target (alphabets to integers)
label_encoder_target = LabelEncoder()
y_encoded = label_encoder_target.fit_transform(y)
# Check for categorical features in X and label encode if any
for col in [Link]:
if X[col].dtype == 'object':
le = LabelEncoder()
X[col] = le.fit_transform(X[col])
# Standard scale features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# One-hot encode target for classification
num_classes = len([Link](y_encoded))
y_cat = to_categorical(y_encoded, num_classes)
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y_cat, test_size=0.2, random_state=42,
stratify=y_cat)
# Build simple DNN
def build_model():
model = Sequential([
Dense(64, input_shape=(X_train.shape[1],), activation='relu'),
Dense(32, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model
# Parameters
batch_size = 32
learning_rate = 0.0001
# Compile model
model = build_model()
optimizer = SGD(learning_rate=learning_rate)
[Link](optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# Train model
history = [Link](X_train, y_train, epochs=30, batch_size=batch_size, validation_split=0.1,
verbose=2)
# Evaluate on test set
test_loss, test_accuracy = [Link](X_test, y_test, verbose=0)
print(f"\nTest Accuracy: {test_accuracy*100:.2f}%")
Statement 11
Use a batch size of 64 and learning rate of 0.001 to train a DNN on the UCI dataset. Document
training accuracy and loss.
import pandas as pd
import numpy as np
from [Link] import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from [Link] import Sequential
from [Link] import Dense
from [Link] import SGD
from [Link] import to_categorical
import [Link] as plt
# Load UCI Alphabet dataset (Letter Recognition)
# Replace '[Link]' with your actual path if different
data = pd.read_csv('[Link]')
# Assuming last column is target (letters)
X = [Link][:, :-1]
y = [Link][:, -1]
# Label encode target (letters to integers)
label_encoder_target = LabelEncoder()
y_encoded = label_encoder_target.fit_transform(y)
# Check for categorical features in X and label encode if any (usually numeric, but just in case)
for col in [Link]:
if X[col].dtype == 'object':
le = LabelEncoder()
X[col] = le.fit_transform(X[col])
# Standard scale features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# One-hot encode target for classification
num_classes = len([Link](y_encoded))
y_cat = to_categorical(y_encoded, num_classes)
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y_cat, test_size=0.2, random_state=42,
stratify=y_cat)
# Build DNN model
def build_model():
model = Sequential([
Dense(128, input_shape=(X_train.shape[1],), activation='relu'),
Dense(64, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model
# Parameters
batch_size = 64
learning_rate = 0.001
# Compile model
model = build_model()
optimizer = SGD(learning_rate=learning_rate)
[Link](optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# Train model and record history
history = [Link](X_train, y_train, epochs=30, batch_size=batch_size, validation_split=0.1,
verbose=2)
# Plot training accuracy and loss
[Link](figsize=(12,5))
[Link](1,2,1)
[Link]([Link]['accuracy'], label='Train Accuracy')
[Link]([Link]['val_accuracy'], label='Validation Accuracy')
[Link]('Training and Validation Accuracy')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link](1,2,2)
[Link]([Link]['loss'], label='Train Loss')
[Link]([Link]['val_loss'], label='Validation Loss')
[Link]('Training and Validation Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
# Evaluate on test set
test_loss, test_accuracy = [Link](X_test, y_test, verbose=0)
print(f"\nTest Accuracy: {test_accuracy*100:.2f}%")
Statement 12
Preprocess the Alphabet dataset and train a CNN with the architecture using Adam optimizer,
20 epochs, batch size 64, and learning rate 0.001.
import pandas as pd
import numpy as np
from [Link] import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from [Link] import Sequential
from [Link] import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from [Link] import Adam
from [Link] import to_categorical
import [Link] as plt
# Load Alphabet dataset
data = pd.read_csv('[Link]') # replace with your path
# Features and target
X = [Link][:, :-1]
y = [Link][:, -1]
# Label encode target
le_target = LabelEncoder()
y_encoded = le_target.fit_transform(y)
# Check for categorical features in X and encode if any (usually numeric)
for col in [Link]:
if X[col].dtype == 'object':
le = LabelEncoder()
X[col] = le.fit_transform(X[col])
# Standard scale features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Number of samples and features
n_samples, n_features = X_scaled.shape
# Reshape features to 2D for CNN input
# For example, reshape to (samples, 4, 4, 1) if features = 16
# If features != perfect square, pad with zeros
import math
def reshape_for_cnn(X):
n = [Link][1]
sq = int([Link]([Link](n))) # smallest square side >= n
padded_size = sq*sq
# Pad zeros if needed
if padded_size > n:
padding = [Link](([Link][0], padded_size - n))
X_padded = [Link]((X, padding))
else:
X_padded = X
# Reshape to (samples, sq, sq, 1)
return X_padded.reshape(-1, sq, sq, 1)
X_cnn = reshape_for_cnn(X_scaled)
# One-hot encode target
num_classes = len([Link](y_encoded))
y_cat = to_categorical(y_encoded, num_classes)
# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X_cnn, y_cat, test_size=0.2, random_state=42,
stratify=y_cat)
# Build CNN model
model = Sequential([
Conv2D(32, (3,3), activation='relu', input_shape=X_train.shape[1:]),
MaxPooling2D((2,2)),
Dropout(0.25),
Conv2D(64, (3,3), activation='relu'),
MaxPooling2D((2,2)),
Dropout(0.25),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
])
# Compile
learning_rate = 0.001
optimizer = Adam(learning_rate=learning_rate)
[Link](optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# Train
history = [Link](X_train, y_train, epochs=20, batch_size=64, validation_split=0.1, verbose=2)
# Plot training accuracy and loss
import [Link] as plt
[Link](figsize=(12,5))
[Link](1,2,1)
[Link]([Link]['accuracy'], label='Train Accuracy')
[Link]([Link]['val_accuracy'], label='Validation Accuracy')
[Link]('Accuracy')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link](1,2,2)
[Link]([Link]['loss'], label='Train Loss')
[Link]([Link]['val_loss'], label='Validation Loss')
[Link]('Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
# Evaluate on test set
test_loss, test_acc = [Link](X_test, y_test, verbose=0)
print(f"\nTest Accuracy: {test_acc*100:.2f}%")
Statement 13
Compare the performance of a CNN and a DNN on the CIFAR-10 dataset. Highlight differences
in accuracy and training time.
import time
import numpy as np
import [Link] as plt
from [Link] import cifar10
from [Link] import Sequential
from [Link] import Dense, Flatten, Conv2D, MaxPooling2D
from [Link] import to_categorical
from [Link] import Adam
from [Link] import History
# Load CIFAR-10
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# Normalize data
X_train = X_train.astype('float32') / 255.0
X_test = X_test.astype('float32') / 255.0
# One-hot encode labels
num_classes = 10
y_train_cat = to_categorical(y_train, num_classes)
y_test_cat = to_categorical(y_test, num_classes)
# 1) Define DNN model (simple feedforward)
def build_dnn():
model = Sequential([
Flatten(input_shape=X_train.shape[1:]),
Dense(512, activation='relu'),
Dense(256, activation='relu'),
Dense(128, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model
# 2) Define CNN model
def build_cnn():
model = Sequential([
Conv2D(32, (3,3), activation='relu', padding='same', input_shape=X_train.shape[1:]),
MaxPooling2D((2,2)),
Conv2D(64, (3,3), activation='relu', padding='same'),
MaxPooling2D((2,2)),
Conv2D(128, (3,3), activation='relu', padding='same'),
MaxPooling2D((2,2)),
Flatten(),
Dense(128, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model
# Training parameters
batch_size = 64
epochs = 15
learning_rate = 0.001
optimizer = Adam(learning_rate=learning_rate)
# Train and time DNN
dnn = build_dnn()
[Link](optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
start_time = [Link]()
history_dnn = [Link](X_train, y_train_cat, epochs=epochs, batch_size=batch_size,
validation_split=0.1, verbose=2)
dnn_time = [Link]() - start_time
# Evaluate DNN
dnn_loss, dnn_acc = [Link](X_test, y_test_cat, verbose=0)
# Train and time CNN
cnn = build_cnn()
[Link](optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
start_time = [Link]()
history_cnn = [Link](X_train, y_train_cat, epochs=epochs, batch_size=batch_size,
validation_split=0.1, verbose=2)
cnn_time = [Link]() - start_time
# Evaluate CNN
cnn_loss, cnn_acc = [Link](X_test, y_test_cat, verbose=0)
# Print results
print(f"DNN Test Accuracy: {dnn_acc*100:.2f}% | Training time: {dnn_time:.2f} seconds")
print(f"CNN Test Accuracy: {cnn_acc*100:.2f}% | Training time: {cnn_time:.2f} seconds")
# Plot accuracy comparison
[Link](figsize=(10,5))
[Link](history_dnn.history['val_accuracy'], label='DNN Validation Accuracy')
[Link](history_cnn.history['val_accuracy'], label='CNN Validation Accuracy')
[Link]('Validation Accuracy: DNN vs CNN on CIFAR-10')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link]()
Statement 14
Implement a Deep Neural Network (DNN) on the MNIST dataset using the Adam optimizer with
a learning rate of 0.001 and plot training accuracy and loss.
import [Link] as plt
from [Link] import mnist
from [Link] import Sequential
from [Link] import Dense, Flatten
from [Link] import Adam
from [Link] import to_categorical
# Load MNIST data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Normalize
X_train = X_train.astype('float32') / 255.0
X_test = X_test.astype('float32') / 255.0
# One-hot encode labels
num_classes = 10
y_train_cat = to_categorical(y_train, num_classes)
y_test_cat = to_categorical(y_test, num_classes)
# Build DNN model
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(512, activation='relu'),
Dense(256, activation='relu'),
Dense(num_classes, activation='softmax')
])
# Compile with Adam optimizer
optimizer = Adam(learning_rate=0.001)
[Link](optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# Train model
history = [Link](X_train, y_train_cat, epochs=20, batch_size=64, validation_split=0.1, verbose=2)
# Plot training accuracy and loss
[Link](figsize=(12, 5))
[Link](1, 2, 1)
[Link]([Link]['accuracy'], label='Train Accuracy')
[Link]([Link]['val_accuracy'], label='Validation Accuracy')
[Link]('Accuracy')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link](1, 2, 2)
[Link]([Link]['loss'], label='Train Loss')
[Link]([Link]['val_loss'], label='Validation Loss')
[Link]('Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
Statement 15
Implement a DNN using RMSprop with learning rates 0.01 and 0.0001 on the Wildfire dataset.
Compare training and validation performance.
import pandas as pd
import numpy as np
import [Link] as plt
from sklearn.model_selection import train_test_split
from [Link] import StandardScaler, LabelEncoder
from [Link] import Sequential
from [Link] import Dense
from [Link] import RMSprop
from [Link] import to_categorical
# Load Wildfire dataset (replace '[Link]' with actual path)
data = pd.read_csv('[Link]')
# Example preprocessing - adjust based on your dataset columns:
# Assume last column is the target variable
X = [Link][:, :-1].values
y = [Link][:, -1].values
# Encode target if categorical
if [Link] == object or len([Link](y)) < 20:
le = LabelEncoder()
y = le.fit_transform(y)
# If classification, convert to categorical
num_classes = len([Link](y))
y_cat = to_categorical(y, num_classes)
# Split data
X_train, X_val, y_train, y_val = train_test_split(X, y_cat, test_size=0.2, random_state=42)
# Standard scale features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = [Link](X_val)
# Define DNN model builder function
def build_model(input_dim, num_classes):
model = Sequential([
Dense(128, activation='relu', input_dim=input_dim),
Dense(64, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model
# Training params
batch_size = 32
epochs = 30
# Train with RMSprop lr=0.01
model_high_lr = build_model(X_train.shape[1], num_classes)
model_high_lr.compile(optimizer=RMSprop(learning_rate=0.01),
loss='categorical_crossentropy', metrics=['accuracy'])
history_high_lr = model_high_lr.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=epochs, batch_size=batch_size, verbose=2)
# Train with RMSprop lr=0.0001
model_low_lr = build_model(X_train.shape[1], num_classes)
model_low_lr.compile(optimizer=RMSprop(learning_rate=0.0001),
loss='categorical_crossentropy', metrics=['accuracy'])
history_low_lr = model_low_lr.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=epochs, batch_size=batch_size, verbose=2)
# Plot training and validation accuracy and loss for both learning rates
[Link](figsize=(14, 6))
[Link](1, 2, 1)
[Link](history_high_lr.history['accuracy'], label='Train Acc (lr=0.01)')
[Link](history_high_lr.history['val_accuracy'], label='Val Acc (lr=0.01)')
[Link](history_low_lr.history['accuracy'], label='Train Acc (lr=0.0001)')
[Link](history_low_lr.history['val_accuracy'], label='Val Acc (lr=0.0001)')
[Link]('Training and Validation Accuracy')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link](1, 2, 2)
[Link](history_high_lr.history['loss'], label='Train Loss (lr=0.01)')
[Link](history_high_lr.history['val_loss'], label='Val Loss (lr=0.01)')
[Link](history_low_lr.history['loss'], label='Train Loss (lr=0.0001)')
[Link](history_low_lr.history['val_loss'], label='Val Loss (lr=0.0001)')
[Link]('Training and Validation Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
Statement 16
Multiclass classification using Deep Neural Networks: Example: Use the OCR letter
recognition dataset/[Link]
import pandas as pd
import numpy as np
import [Link] as plt
from sklearn.model_selection import train_test_split
from [Link] import LabelEncoder, StandardScaler
from [Link] import classification_report
from [Link] import Sequential
from [Link] import Dense
from [Link] import to_categorical
# Load dataset (adjust path as needed)
data = pd.read_csv('[Link]')
# Inspect columns, usually first column is label, rest are features
print([Link]())
# Separate features and labels
X = [Link][:, 1:].values # all columns except first are features
y = [Link][:, 0].values # first column is the label (letters)
# Encode labels (letters) to integers
le = LabelEncoder()
y_enc = le.fit_transform(y)
# One-hot encode output labels for multiclass classification
num_classes = len([Link](y_enc))
y_cat = to_categorical(y_enc, num_classes)
# Train/test split
X_train, X_test, y_train, y_test = train_test_split(X, y_cat, test_size=0.2, random_state=42)
# Standardize features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = [Link](X_test)
# Build DNN model
model = Sequential([
Dense(128, activation='relu', input_shape=(X_train.shape[1],)),
Dense(64, activation='relu'),
Dense(num_classes, activation='softmax')
])
# Compile model
[Link](optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Train model
history = [Link](X_train, y_train, epochs=30, batch_size=64, validation_split=0.1, verbose=2)
# Evaluate on test data
loss, accuracy = [Link](X_test, y_test, verbose=0)
print(f"Test Accuracy: {accuracy*100:.2f}%")
# Predict classes for test set
y_pred_prob = [Link](X_test)
y_pred = [Link](y_pred_prob, axis=1)
y_true = [Link](y_test, axis=1)
# Classification report
print("\nClassification Report:\n")
print(classification_report(y_true, y_pred, target_names=le.classes_))
# Plot accuracy and loss
[Link](figsize=(12, 5))
[Link](1, 2, 1)
[Link]([Link]['accuracy'], label='Train Accuracy')
[Link]([Link]['val_accuracy'], label='Validation Accuracy')
[Link]('Accuracy')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link](1, 2, 2)
[Link]([Link]['loss'], label='Train Loss')
[Link]([Link]['val_loss'], label='Validation Loss')
[Link]('Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
Statement 17
Implement the training of a DNN using Adam and SGD optimizers with a learning rate of
0.001 on the Wildfire dataset. Provide comparative plots.
import pandas as pd
import numpy as np
import [Link] as plt
from sklearn.model_selection import train_test_split
from [Link] import StandardScaler, LabelEncoder
from [Link] import Sequential
from [Link] import Dense
from [Link] import Adam, SGD
from [Link] import to_categorical
# Load Wildfire dataset - replace with your actual file path
data = pd.read_csv('[Link]')
# Example preprocessing - adjust based on your dataset structure
X = [Link][:, :-1].values
y = [Link][:, -1].values
# Encode target if categorical
if [Link] == object or len([Link](y)) < 20:
le = LabelEncoder()
y = le.fit_transform(y)
num_classes = len([Link](y))
y_cat = to_categorical(y, num_classes)
# Split data
X_train, X_val, y_train, y_val = train_test_split(X, y_cat, test_size=0.2, random_state=42)
# Standardize features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = [Link](X_val)
# Build DNN model function
def build_model(input_dim, num_classes):
model = Sequential([
Dense(128, activation='relu', input_dim=input_dim),
Dense(64, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model
batch_size = 32
epochs = 30
learning_rate = 0.001
# Train with Adam optimizer
model_adam = build_model(X_train.shape[1], num_classes)
model_adam.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy', metrics=['accuracy'])
history_adam = model_adam.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=epochs, batch_size=batch_size, verbose=2)
# Train with SGD optimizer
model_sgd = build_model(X_train.shape[1], num_classes)
model_sgd.compile(optimizer=SGD(learning_rate=learning_rate),
loss='categorical_crossentropy', metrics=['accuracy'])
history_sgd = model_sgd.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=epochs, batch_size=batch_size, verbose=2)
# Plot comparison graphs
[Link](figsize=(14, 6))
# Accuracy plot
[Link](1, 2, 1)
[Link](history_adam.history['accuracy'], label='Adam Train Acc')
[Link](history_adam.history['val_accuracy'], label='Adam Val Acc')
[Link](history_sgd.history['accuracy'], label='SGD Train Acc')
[Link](history_sgd.history['val_accuracy'], label='SGD Val Acc')
[Link]('Training and Validation Accuracy')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
# Loss plot
[Link](1, 2, 2)
[Link](history_adam.history['loss'], label='Adam Train Loss')
[Link](history_adam.history['val_loss'], label='Adam Val Loss')
[Link](history_sgd.history['loss'], label='SGD Train Loss')
[Link](history_sgd.history['val_loss'], label='SGD Val Loss')
[Link]('Training and Validation Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
Statement 18
Implement a DNN using batch sizes 32 and 64 with a fixed learning rate of 0.001 on the UCI
dataset. Compare model loss and performance.
import pandas as pd
import numpy as np
import [Link] as plt
from sklearn.model_selection import train_test_split
from [Link] import LabelEncoder, StandardScaler
from [Link] import Sequential
from [Link] import Dense
from [Link] import Adam
from [Link] import to_categorical
# Load UCI dataset - replace path accordingly
data = pd.read_csv('uci_dataset.csv')
# Example preprocessing (adjust based on dataset specifics)
X = [Link][:, :-1].values
y = [Link][:, -1].values
# Encode labels if categorical
if [Link] == object or len([Link](y)) < 20:
le = LabelEncoder()
y = le.fit_transform(y)
num_classes = len([Link](y))
y_cat = to_categorical(y, num_classes)
# Train/test split
X_train, X_val, y_train, y_val = train_test_split(X, y_cat, test_size=0.2, random_state=42)
# Standardize features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = [Link](X_val)
# Build model function
def build_model(input_dim, num_classes):
model = Sequential([
Dense(128, activation='relu', input_dim=input_dim),
Dense(64, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model
learning_rate = 0.001
epochs = 30
# Train with batch size 32
model_32 = build_model(X_train.shape[1], num_classes)
model_32.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy', metrics=['accuracy'])
history_32 = model_32.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=epochs, batch_size=32, verbose=2)
# Train with batch size 64
model_64 = build_model(X_train.shape[1], num_classes)
model_64.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy', metrics=['accuracy'])
history_64 = model_64.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=epochs, batch_size=64, verbose=2)
# Plot Loss comparison
[Link](figsize=(14, 6))
[Link](1, 2, 1)
[Link](history_32.history['loss'], label='Batch Size 32 - Train Loss')
[Link](history_32.history['val_loss'], label='Batch Size 32 - Val Loss')
[Link](history_64.history['loss'], label='Batch Size 64 - Train Loss')
[Link](history_64.history['val_loss'], label='Batch Size 64 - Val Loss')
[Link]('Loss Comparison')
[Link]('Epoch')
[Link]('Loss')
[Link]()
# Plot Accuracy comparison
[Link](1, 2, 2)
[Link](history_32.history['accuracy'], label='Batch Size 32 - Train Acc')
[Link](history_32.history['val_accuracy'], label='Batch Size 32 - Val Acc')
[Link](history_64.history['accuracy'], label='Batch Size 64 - Train Acc')
[Link](history_64.history['val_accuracy'], label='Batch Size 64 - Val Acc')
[Link]('Accuracy Comparison')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link]()
Statement 19
Preprocess the Alphabet dataset and train both a DNN and a CNN. Use Adam optimizer with a
batch size of 64. Compare accuracy across 20 epochs.
import pandas as pd
import numpy as np
import [Link] as plt
from sklearn.model_selection import train_test_split
from [Link] import LabelEncoder, StandardScaler
from [Link] import Sequential
from [Link] import Dense, Conv2D, MaxPooling2D, Flatten, Dropout
from [Link] import Adam
from [Link] import to_categorical
# Load Alphabet dataset (adjust path as needed)
data = pd.read_csv('[Link]')
# Separate features and target (assuming last column is label)
X = [Link][:, :-1].values
y = [Link][:, -1].values
# Encode labels
le = LabelEncoder()
y_encoded = le.fit_transform(y)
num_classes = len([Link](y_encoded))
y_cat = to_categorical(y_encoded, num_classes)
# Train-test split
X_train, X_val, y_train, y_val = train_test_split(X, y_cat, test_size=0.2, random_state=42)
# Standardize features for DNN and CNN
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_val_scaled = [Link](X_val)
# For CNN: reshape input into image format
# Alphabet dataset is usually 16x16 images (256 features), reshape accordingly
img_dim = 16 # update if different
X_train_cnn = X_train_scaled.reshape(-1, img_dim, img_dim, 1)
X_val_cnn = X_val_scaled.reshape(-1, img_dim, img_dim, 1)
# Parameters
batch_size = 64
epochs = 20
learning_rate = 0.001
# Build DNN model
def build_dnn(input_dim, num_classes):
model = Sequential([
Dense(256, activation='relu', input_dim=input_dim),
Dense(128, activation='relu'),
Dense(num_classes, activation='softmax')
])
return model
# Build CNN model
def build_cnn(input_shape, num_classes):
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=input_shape),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
])
return model
# Train DNN
dnn = build_dnn(X_train_scaled.shape[1], num_classes)
[Link](optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy', metrics=['accuracy'])
history_dnn = [Link](X_train_scaled, y_train,
validation_data=(X_val_scaled, y_val),
epochs=epochs, batch_size=batch_size, verbose=2)
# Train CNN
cnn = build_cnn(X_train_cnn.shape[1:], num_classes)
[Link](optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy', metrics=['accuracy'])
history_cnn = [Link](X_train_cnn, y_train,
validation_data=(X_val_cnn, y_val),
epochs=epochs, batch_size=batch_size, verbose=2)
# Plot Accuracy Comparison
[Link](figsize=(12,5))
[Link](history_dnn.history['val_accuracy'], label='DNN Validation Accuracy')
[Link](history_cnn.history['val_accuracy'], label='CNN Validation Accuracy')
[Link]('Validation Accuracy Comparison')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link]()
Statement 20
Classify Apple leaf images using a CNN without data augmentation for 10 epochs.
dataset/
train/
class1/
class2/
...
validation/
class1/
class2/
...
import tensorflow as tf
from [Link] import ImageDataGenerator
from [Link] import Sequential
from [Link] import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from [Link] import Adam
import [Link] as plt
# Paths to dataset directories (update these paths)
train_dir = 'dataset/train'
val_dir = 'dataset/validation'
# Image parameters
img_height, img_width = 150, 150
batch_size = 32
epochs = 10
learning_rate = 0.001
# Use ImageDataGenerator for loading images WITHOUT augmentation
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical'
)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical'
)
num_classes = len(train_generator.class_indices)
# Build CNN model
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(img_height, img_width, 3)),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Conv2D(128, (3, 3), activation='relu'),
MaxPooling2D((2, 2)),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
])
[Link](optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
history = [Link](
train_generator,
validation_data=val_generator,
epochs=epochs,
verbose=2
)
# Plot training & validation accuracy and loss
[Link](figsize=(12,5))
[Link](1,2,1)
[Link]([Link]['accuracy'], label='Train Accuracy')
[Link]([Link]['val_accuracy'], label='Validation Accuracy')
[Link]('Accuracy')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link](1,2,2)
[Link]([Link]['loss'], label='Train Loss')
[Link]([Link]['val_loss'], label='Validation Loss')
[Link]('Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
Statement 21
Implement a CNN on Tomato dataset using batch sizes of 32 and 64 separately. Keep the learning
rate fixed at 0.0001 and compare results.
tomato_dataset/
train/
class1/
class2/
validation/
class1/
class2/
import tensorflow as tf
from [Link] import ImageDataGenerator
from [Link] import Sequential
from [Link] import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from [Link] import Adam
import [Link] as plt
# Update these paths to your dataset location
train_dir = 'tomato_dataset/train'
val_dir = 'tomato_dataset/validation'
img_height, img_width = 150, 150
learning_rate = 0.0001
epochs = 10
def create_data_generators(batch_size):
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
shuffle=True
)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
shuffle=False
)
return train_generator, val_generator
def build_cnn_model(input_shape, num_classes):
model = Sequential([
Conv2D(32, (3,3), activation='relu', input_shape=input_shape),
MaxPooling2D(2,2),
Conv2D(64, (3,3), activation='relu'),
MaxPooling2D(2,2),
Conv2D(128, (3,3), activation='relu'),
MaxPooling2D(2,2),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
])
return model
# Train with batch size 32
batch_size_32 = 32
train_gen_32, val_gen_32 = create_data_generators(batch_size_32)
num_classes = len(train_gen_32.class_indices)
input_shape = (img_height, img_width, 3)
model_32 = build_cnn_model(input_shape, num_classes)
model_32.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])
history_32 = model_32.fit(
train_gen_32,
validation_data=val_gen_32,
epochs=epochs,
verbose=2
)
# Train with batch size 64
batch_size_64 = 64
train_gen_64, val_gen_64 = create_data_generators(batch_size_64)
model_64 = build_cnn_model(input_shape, num_classes)
model_64.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])
history_64 = model_64.fit(
train_gen_64,
validation_data=val_gen_64,
epochs=epochs,
verbose=2
)
# Plot accuracy and loss comparison
[Link](figsize=(14,6))
[Link](1,2,1)
[Link](history_32.history['val_accuracy'], label='Batch size 32')
[Link](history_64.history['val_accuracy'], label='Batch size 64')
[Link]('Validation Accuracy Comparison')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link](1,2,2)
[Link](history_32.history['val_loss'], label='Batch size 32')
[Link](history_64.history['val_loss'], label='Batch size 64')
[Link]('Validation Loss Comparison')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
Statement 22
Implement CNNs using Adam and RMSprop optimizers with a learning rate of 0.001 on Peach
images. Record validation loss and accuracy.
peach_dataset/
train/
ripe/
img_001.jpg
img_002.jpg
...
unripe/
img_001.jpg
img_002.jpg
...
validation/
ripe/
img_101.jpg
img_102.jpg
...
unripe/
img_101.jpg
img_102.jpg
...
import tensorflow as tf
from [Link] import ImageDataGenerator
from [Link] import Sequential
from [Link] import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from [Link] import Adam, RMSprop
import [Link] as plt
# Update these paths to your Peach dataset locations
train_dir = 'peach_dataset/train'
val_dir = 'peach_dataset/validation'
img_height, img_width = 150, 150
learning_rate = 0.001
epochs = 10
batch_size = 32
def create_data_generators(batch_size):
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
shuffle=True
)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
shuffle=False
)
return train_generator, val_generator
def build_cnn_model(input_shape, num_classes):
model = Sequential([
Conv2D(32, (3,3), activation='relu', input_shape=input_shape),
MaxPooling2D(2,2),
Conv2D(64, (3,3), activation='relu'),
MaxPooling2D(2,2),
Conv2D(128, (3,3), activation='relu'),
MaxPooling2D(2,2),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
])
return model
# Prepare data generators
train_gen, val_gen = create_data_generators(batch_size)
num_classes = len(train_gen.class_indices)
input_shape = (img_height, img_width, 3)
# Model with Adam optimizer
model_adam = build_cnn_model(input_shape, num_classes)
model_adam.compile(optimizer=Adam(learning_rate=learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])
history_adam = model_adam.fit(
train_gen,
validation_data=val_gen,
epochs=epochs,
verbose=2
)
# Model with RMSprop optimizer
model_rmsprop = build_cnn_model(input_shape, num_classes)
model_rmsprop.compile(optimizer=RMSprop(learning_rate=learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])
history_rmsprop = model_rmsprop.fit(
train_gen,
validation_data=val_gen,
epochs=epochs,
verbose=2
)
# Plot validation accuracy and loss for comparison
[Link](figsize=(14,6))
[Link](1,2,1)
[Link](history_adam.history['val_accuracy'], label='Adam')
[Link](history_rmsprop.history['val_accuracy'], label='RMSprop')
[Link]('Validation Accuracy')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link](1,2,2)
[Link](history_adam.history['val_loss'], label='Adam')
[Link](history_rmsprop.history['val_loss'], label='RMSprop')
[Link]('Validation Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
Statement 23
Build and train a CNN model for Apple image classification that includes Dropout layers. Train
using 15 epochs and evaluate performance.
import tensorflow as tf
from [Link] import ImageDataGenerator
from [Link] import Sequential
from [Link] import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
import [Link] as plt
# Set paths to your Apple image dataset folders
train_dir = 'apple_dataset/train'
val_dir = 'apple_dataset/validation'
img_height, img_width = 150, 150
batch_size = 32
epochs = 15
# Data generators with rescaling
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
shuffle=True
)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
shuffle=False
)
num_classes = len(train_generator.class_indices)
# Build CNN model with Dropout
model = Sequential([
Conv2D(32, (3,3), activation='relu', input_shape=(img_height, img_width, 3)),
MaxPooling2D(2,2),
Conv2D(64, (3,3), activation='relu'),
MaxPooling2D(2,2),
Conv2D(128, (3,3), activation='relu'),
MaxPooling2D(2,2),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5), # Dropout layer to reduce overfitting
Dense(num_classes, activation='softmax')
])
[Link](optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
history = [Link](
train_generator,
validation_data=val_generator,
epochs=epochs,
verbose=2
)
# Evaluate performance on validation set
val_loss, val_accuracy = [Link](val_generator)
print(f'Validation Loss: {val_loss:.4f}')
print(f'Validation Accuracy: {val_accuracy:.4f}')
# Plot training & validation accuracy and loss
[Link](figsize=(12,5))
[Link](1,2,1)
[Link]([Link]['accuracy'], label='Train Accuracy')
[Link]([Link]['val_accuracy'], label='Val Accuracy')
[Link]('Training & Validation Accuracy')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link](1,2,2)
[Link]([Link]['loss'], label='Train Loss')
[Link]([Link]['val_loss'], label='Val Loss')
[Link]('Training & Validation Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
Statement 24
Split Grape image data into 70% train, 15% validation, and 15% test. Train a CNN for 10
epochs using a fixed learning rate of 0.001.
path_to_dataset/
class1/
[Link]
[Link]
class2/
[Link]
...
import tensorflow as tf
from [Link] import layers, models
from [Link] import Adam
import [Link] as plt
import os
# Set dataset path
dataset_dir = 'path_to_dataset' # e.g. 'Grape_images/'
# Parameters
img_height, img_width = 150, 150
batch_size = 32
learning_rate = 0.001
epochs = 15
validation_split = 0.15 # for validation set
test_split = 0.15 # test set will be created separately below
# Load full dataset with validation split using image_dataset_from_directory
full_dataset = [Link].image_dataset_from_directory(
dataset_dir,
shuffle=True,
image_size=(img_height, img_width),
batch_size=batch_size,
validation_split=validation_split + test_split,
subset="training",
seed=123
)
val_test_dataset = [Link].image_dataset_from_directory(
dataset_dir,
shuffle=True,
image_size=(img_height, img_width),
batch_size=batch_size,
validation_split=validation_split + test_split,
subset="validation",
seed=123
)
# Split val_test_dataset into validation and test sets manually
val_batches = int(len(val_test_dataset)*validation_split/(validation_split + test_split))
val_dataset = val_test_dataset.take(val_batches)
test_dataset = val_test_dataset.skip(val_batches)
# Normalize pixel values to [0,1]
normalization_layer = [Link](1./255)
full_dataset = full_dataset.map(lambda x, y: (normalization_layer(x), y))
val_dataset = val_dataset.map(lambda x, y: (normalization_layer(x), y))
test_dataset = test_dataset.map(lambda x, y: (normalization_layer(x), y))
# Build CNN Model with Dropout
num_classes = len(full_dataset.class_names)
model = [Link]([
layers.Conv2D(32, (3,3), activation='relu', input_shape=(img_height, img_width, 3)),
layers.MaxPooling2D(2,2),
layers.Conv2D(64, (3,3), activation='relu'),
layers.MaxPooling2D(2,2),
layers.Conv2D(128, (3,3), activation='relu'),
layers.MaxPooling2D(2,2),
[Link](),
[Link](0.5),
[Link](128, activation='relu'),
[Link](num_classes, activation='softmax')
])
# Compile model
[Link](
optimizer=Adam(learning_rate=learning_rate),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
# Train model
history = [Link](
full_dataset,
validation_data=val_dataset,
epochs=epochs
)
# Evaluate on test data
test_loss, test_acc = [Link](test_dataset)
print(f'Test accuracy: {test_acc:.4f}')
print(f'Test loss: {test_loss:.4f}')
# Plot training & validation accuracy and loss
[Link](figsize=(12,4))
[Link](1,2,1)
[Link]([Link]['accuracy'], label='Train Acc')
[Link]([Link]['val_accuracy'], label='Val Acc')
[Link]('Accuracy')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link](1,2,2)
[Link]([Link]['loss'], label='Train Loss')
[Link]([Link]['val_loss'], label='Val Loss')
[Link]('Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
Statement 25
Use LeNet architecture to classify the Cats and Dogs dataset, and plot training loss and
accuracy curves.
import tensorflow as tf
from [Link] import layers, models
import [Link] as plt
import os
# Set dataset path (assumes directory structure like:
# cats_and_dogs/
# cats/
# [Link]
# dogs/
# [Link]
dataset_dir = 'path_to_cats_and_dogs' # Change this path accordingly
# Parameters
img_height, img_width = 32, 32 # LeNet input size is 32x32 grayscale; we keep RGB and resize to
32x32
batch_size = 32
epochs = 15
learning_rate = 0.001
# Load dataset with 80/20 train-validation split
train_ds = [Link].image_dataset_from_directory(
dataset_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)
val_ds = [Link].image_dataset_from_directory(
dataset_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)
# Normalize pixel values to [0,1]
normalization_layer = [Link](1./255)
train_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
val_ds = val_ds.map(lambda x, y: (normalization_layer(x), y))
# Define LeNet architecture
def LeNet():
model = [Link]()
[Link](layers.Conv2D(6, kernel_size=(5,5), activation='tanh', input_shape=(img_height,
img_width, 3), padding='same'))
[Link](layers.AveragePooling2D())
[Link](layers.Conv2D(16, kernel_size=(5,5), activation='tanh'))
[Link](layers.AveragePooling2D())
[Link]([Link]())
[Link]([Link](120, activation='tanh'))
[Link]([Link](84, activation='tanh'))
[Link]([Link](2, activation='softmax')) # 2 classes: cat, dog
return model
model = LeNet()
# Compile model
[Link](
optimizer=[Link](learning_rate=learning_rate),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
# Train model
history = [Link](
train_ds,
validation_data=val_ds,
epochs=epochs
)
# Plot training & validation accuracy and loss
[Link](figsize=(12,5))
[Link](1,2,1)
[Link]([Link]['accuracy'], label='Train Acc')
[Link]([Link]['val_accuracy'], label='Val Acc')
[Link]('Training and Validation Accuracy')
[Link]('Epoch')
[Link]('Accuracy')
[Link]()
[Link](1,2,2)
[Link]([Link]['loss'], label='Train Loss')
[Link]([Link]['val_loss'], label='Val Loss')
[Link]('Training and Validation Loss')
[Link]('Epoch')
[Link]('Loss')
[Link]()
[Link]()
Statement 26
Use MobileNet architecture perform transfer learning on the Cats and Dogs dataset, and
evaluate model performance using a classification report.
import tensorflow as tf
from [Link] import layers, models
from [Link] import MobileNet
from [Link] import preprocess_input
from [Link] import image_dataset_from_directory
from [Link] import classification_report
import numpy as np
import os
# Dataset directory structure (example):
# cats_and_dogs/
# cats/
# [Link]
# ...
# dogs/
# [Link]
# ...
dataset_dir = 'path_to_cats_and_dogs' # Change to your actual path
# Parameters
img_height, img_width = 224, 224 # MobileNet default input size
batch_size = 32
epochs = 10
learning_rate = 0.0001
# Load datasets with 80/20 split
train_ds = image_dataset_from_directory(
dataset_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)
val_ds = image_dataset_from_directory(
dataset_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size
)
# Preprocess input for MobileNet
train_ds = train_ds.map(lambda x, y: (preprocess_input(x), y))
val_ds = val_ds.map(lambda x, y: (preprocess_input(x), y))
# Cache and prefetch for performance optimization
AUTOTUNE = [Link]
train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
# Load MobileNet base model with pretrained weights, exclude top layers
base_model = MobileNet(input_shape=(img_height, img_width, 3),
include_top=False,
weights='imagenet')
base_model.trainable = False # Freeze base model layers initially
# Add classification head
model = [Link]([
base_model,
layers.GlobalAveragePooling2D(),
[Link](128, activation='relu'),
[Link](0.3),
[Link](2, activation='softmax') # 2 classes: cats and dogs
])
# Compile model
[Link](optimizer=[Link](learning_rate=learning_rate),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train the top layers first
history = [Link](train_ds, validation_data=val_ds, epochs=epochs)
# Optional: Fine-tune some base model layers
base_model.trainable = True
# Fine-tune from this layer onwards
fine_tune_at = 100
for layer in base_model.layers[:fine_tune_at]:
[Link] = False
# Recompile with lower learning rate for fine-tuning
[Link](optimizer=[Link](learning_rate=learning_rate/10),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Continue training
fine_tune_epochs = 5
total_epochs = epochs + fine_tune_epochs
history_fine = [Link](train_ds,
validation_data=val_ds,
epochs=total_epochs,
initial_epoch=[Link][-1])
# Evaluate on validation set and print classification report
# Extract true labels and predictions
y_true = []
y_pred = []
for images, labels in val_ds:
preds = [Link](images)
y_true.extend([Link]())
y_pred.extend([Link](preds, axis=1))
print("Classification Report on Validation Set:")
print(classification_report(y_true, y_pred, target_names=train_ds.class_names))
Statement 27
Build both CNN and DNN models for the CIFAR-10 dataset, compare their accuracy and loss
import tensorflow as tf
from [Link] import layers, models
import [Link] as plt
# Load CIFAR-10 dataset
(x_train, y_train), (x_test, y_test) = [Link].cifar10.load_data()
# Normalize pixel values to [0,1]
x_train, x_test = x_train / 255.0, x_test / 255.0
# Flatten labels
y_train = y_train.flatten()
y_test = y_test.flatten()
num_classes = 10
# Build CNN model
def build_cnn():
model = [Link]([
layers.Conv2D(32, (3,3), activation='relu', input_shape=(32,32,3)),
layers.MaxPooling2D(2,2),
layers.Conv2D(64, (3,3), activation='relu'),
layers.MaxPooling2D(2,2),
layers.Conv2D(128, (3,3), activation='relu'),
[Link](),
[Link](128, activation='relu'),
[Link](num_classes, activation='softmax')
])
return model
# Build DNN model
def build_dnn():
model = [Link]([
[Link](input_shape=(32,32,3)),
[Link](512, activation='relu'),
[Link](256, activation='relu'),
[Link](128, activation='relu'),
[Link](num_classes, activation='softmax')
])
return model
# Compile and train model helper
def compile_and_train(model, epochs=15):
[Link](optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = [Link](x_train, y_train,
validation_data=(x_test, y_test),
epochs=epochs,
batch_size=64,
verbose=2)
return history
# Train CNN
cnn_model = build_cnn()
print("Training CNN model...")
cnn_history = compile_and_train(cnn_model)
# Train DNN
dnn_model = build_dnn()
print("\nTraining DNN model...")
dnn_history = compile_and_train(dnn_model)
# Plot accuracy and loss comparison
[Link](figsize=(12,5))
# Accuracy plot
[Link](1,2,1)
[Link](cnn_history.history['accuracy'], label='CNN Train Acc')
[Link](cnn_history.history['val_accuracy'], label='CNN Val Acc')
[Link](dnn_history.history['accuracy'], label='DNN Train Acc')
[Link](dnn_history.history['val_accuracy'], label='DNN Val Acc')
[Link]('Training and Validation Accuracy')
[Link]('Epochs')
[Link]('Accuracy')
[Link]()
# Loss plot
[Link](1,2,2)
[Link](cnn_history.history['loss'], label='CNN Train Loss')
[Link](cnn_history.history['val_loss'], label='CNN Val Loss')
[Link](dnn_history.history['loss'], label='DNN Train Loss')
[Link](dnn_history.history['val_loss'], label='DNN Val Loss')
[Link]('Training and Validation Loss')
[Link]('Epochs')
[Link]('Loss')
[Link]()
[Link]()
Statement 28
Implement an RNN on the [Link] dataset and compare its training time and loss curve
with an LSTM model.
import pandas as pd
import numpy as np
import [Link] as plt
import time
from [Link] import MinMaxScaler
from [Link] import Sequential
from [Link] import SimpleRNN, LSTM, Dense
from [Link] import Adam
# Load the [Link] dataset (make sure it's in the working directory)
data = pd.read_csv('[Link]')
# Assuming dataset has 'Date' and 'Close' columns; focus on 'Close' prices
close_prices = data['Close'].[Link](-1, 1)
# Normalize prices between 0 and 1
scaler = MinMaxScaler()
scaled_close = scaler.fit_transform(close_prices)
# Prepare time series sequences
def create_sequences(data, seq_length=20):
X, y = [], []
for i in range(len(data) - seq_length):
[Link](data[i:i+seq_length])
[Link](data[i+seq_length])
return [Link](X), [Link](y)
SEQ_LENGTH = 20
X, y = create_sequences(scaled_close, SEQ_LENGTH)
# Split into train and test sets (e.g., 80%-20%)
split = int(0.8 * len(X))
X_train, X_test = X[:split], X[split:]
y_train, y_test = y[:split], y[split:]
# Build Simple RNN model
def build_rnn():
model = Sequential([
SimpleRNN(50, activation='tanh', input_shape=(SEQ_LENGTH, 1)),
Dense(1)
])
[Link](optimizer=Adam(), loss='mse')
return model
# Build LSTM model
def build_lstm():
model = Sequential([
LSTM(50, activation='tanh', input_shape=(SEQ_LENGTH, 1)),
Dense(1)
])
[Link](optimizer=Adam(), loss='mse')
return model
# Train and record time + loss history
def train_model(model, epochs=30, batch_size=32):
start_time = [Link]()
history = [Link](X_train, y_train, epochs=epochs, batch_size=batch_size,
validation_data=(X_test, y_test), verbose=2)
end_time = [Link]()
training_time = end_time - start_time
return history, training_time
# Prepare data shape (samples, seq_length, features)
X_train = X_train.reshape((X_train.shape[0], SEQ_LENGTH, 1))
X_test = X_test.reshape((X_test.shape[0], SEQ_LENGTH, 1))
# Train RNN
print("Training Simple RNN model...")
rnn_model = build_rnn()
rnn_history, rnn_time = train_model(rnn_model)
# Train LSTM
print("\nTraining LSTM model...")
lstm_model = build_lstm()
lstm_history, lstm_time = train_model(lstm_model)
# Plot loss curves
[Link](figsize=(10,5))
[Link](rnn_history.history['loss'], label='RNN Train Loss')
[Link](rnn_history.history['val_loss'], label='RNN Val Loss')
[Link](lstm_history.history['loss'], label='LSTM Train Loss')
[Link](lstm_history.history['val_loss'], label='LSTM Val Loss')
[Link]('Training and Validation Loss')
[Link]('Epoch')
[Link]('Loss (MSE)')
[Link]()
[Link]()
# Print training time comparison
print(f"Simple RNN Training Time: {rnn_time:.2f} seconds")
print(f"LSTM Training Time: {lstm_time:.2f} seconds")
Statement 29
Use transfer learning with VGG16 on the Cats and Dogs dataset, freezing the first 4 layers, and
train the classifier and evaluate model performance using a classification report.
cats_and_dogs/
├── train/
│ ├── cats/
│ └── dogs/
├── validation/
│ ├── cats/
│ └── dogs/
import tensorflow as tf
from [Link] import ImageDataGenerator
from [Link] import VGG16
from [Link] import Flatten, Dense, Dropout
from [Link] import Model
from [Link] import Adam
from [Link] import classification_report, confusion_matrix
import numpy as np
import os
# Define paths (adjust to your dataset folder structure)
base_dir = 'cats_and_dogs' # folder containing 'train' and 'validation' subfolders
train_dir = [Link](base_dir, 'train')
val_dir = [Link](base_dir, 'validation')
# Parameters
IMG_SIZE = (224, 224)
BATCH_SIZE = 32
EPOCHS = 10
LEARNING_RATE = 0.0001
# Data Generators
train_datagen = ImageDataGenerator(
rescale=1./255,
horizontal_flip=True,
rotation_range=15,
zoom_range=0.1
)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='binary'
)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='binary',
shuffle=False
)
# Load VGG16 base model without top layers
base_model = VGG16(weights='imagenet', include_top=False, input_shape=(*IMG_SIZE, 3))
# Freeze first 4 layers
for layer in base_model.layers[:4]:
[Link] = False
for layer in base_model.layers[4:]:
[Link] = True
# Add custom classification head
x = base_model.output
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.5)(x)
output = Dense(1, activation='sigmoid')(x) # Binary classification
model = Model(inputs=base_model.input, outputs=output)
# Compile model
[Link](optimizer=Adam(learning_rate=LEARNING_RATE),
loss='binary_crossentropy',
metrics=['accuracy'])
# Train model
history = [Link](
train_generator,
epochs=EPOCHS,
validation_data=val_generator
)
# Predict on validation data
val_generator.reset()
preds = [Link](val_generator)
predicted_classes = (preds > 0.5).astype(int).reshape(-1)
# True classes
true_classes = val_generator.classes
# Classification report
target_names = list(train_generator.class_indices.keys())
print(classification_report(true_classes, predicted_classes, target_names=target_names))
# Optionally print confusion matrix
print("Confusion Matrix:")
print(confusion_matrix(true_classes, predicted_classes))
Statement 30
Load and visualize sample images from the Potato dataset,train CNN for 5 epochs
potato_dataset/
├── class1/
├── class2/
└── ...
import [Link] as plt
import tensorflow as tf
from [Link] import ImageDataGenerator
from [Link] import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from [Link] import Sequential
import os
import numpy as np
# Define dataset path - update this to your local Potato dataset directory
dataset_dir = 'potato_dataset' # should contain subfolders for each class
# Parameters
IMG_SIZE = (128, 128)
BATCH_SIZE = 32
EPOCHS = 5
# Data generators with simple augmentation for training, rescale only for validation
train_datagen = ImageDataGenerator(
rescale=1./255,
validation_split=0.2
)
train_generator = train_datagen.flow_from_directory(
dataset_dir,
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
subset='training',
shuffle=True
)
val_generator = train_datagen.flow_from_directory(
dataset_dir,
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
subset='validation',
shuffle=False
)
# Visualize some sample images
def plot_sample_images(generator):
images, labels = next(generator) # batch of images and labels
class_indices = {v: k for k, v in generator.class_indices.items()}
[Link](figsize=(10, 10))
for i in range(9):
[Link](3, 3, i + 1)
[Link](images[i])
label_index = [Link](labels[i])
[Link](class_indices[label_index])
[Link]('off')
[Link]()
plot_sample_images(train_generator)
# Build simple CNN model
model = Sequential([
Conv2D(32, (3,3), activation='relu', input_shape=(*IMG_SIZE, 3)),
MaxPooling2D(2, 2),
Conv2D(64, (3,3), activation='relu'),
MaxPooling2D(2, 2),
Conv2D(128, (3,3), activation='relu'),
MaxPooling2D(2, 2),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(train_generator.num_classes, activation='softmax')
])
[Link](optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train model
history = [Link](
train_generator,
epochs=EPOCHS,
validation_data=val_generator
)
Statement 31
Implement LSTM models on [Link] with learning rates 0.001 and 0.0001 for 20 and 50
epochs. Compare accuracy and convergence.
your_project_folder/
├── [Link]
├── lstm_googl.py # (your code file, if saving separately)
import pandas as pd
import numpy as np
import [Link] as plt
import tensorflow as tf
from [Link] import Sequential
from [Link] import LSTM, Dense, Dropout
from [Link] import MinMaxScaler
from [Link] import mean_squared_error
# Load dataset
df = pd.read_csv('[Link]') # Ensure file is in your working directory
df = df[['Date', 'Close']]
df['Date'] = pd.to_datetime(df['Date'])
df.sort_values('Date', inplace=True)
[Link](inplace=True)
# Normalize closing prices
scaler = MinMaxScaler()
scaled_data = scaler.fit_transform(df[['Close']])
# Prepare sequences for LSTM
def create_sequences(data, time_steps=60):
X, y = [], []
for i in range(time_steps, len(data)):
[Link](data[i - time_steps:i])
[Link](data[i])
return [Link](X), [Link](y)
time_steps = 60
X, y = create_sequences(scaled_data, time_steps)
# Split into train/test
train_size = int(len(X) * 0.8)
X_train, X_test = X[:train_size], X[train_size:]
y_train, y_test = y[:train_size], y[train_size:]
# Define function to build LSTM model
def build_model(learning_rate):
model = Sequential([
LSTM(50, return_sequences=True, input_shape=(X_train.shape[1], 1)),
Dropout(0.2),
LSTM(50),
Dropout(0.2),
Dense(1)
])
optimizer = [Link](learning_rate=learning_rate)
[Link](loss='mean_squared_error', optimizer=optimizer, metrics=['mae'])
return model
# Train models with different settings
configs = [
{'lr': 0.001, 'epochs': 20},
{'lr': 0.0001, 'epochs': 50}
]
histories = []
for cfg in configs:
print(f"\nTraining with LR={cfg['lr']} for {cfg['epochs']} epochs")
model = build_model(cfg['lr'])
history = [Link](
X_train, y_train,
epochs=cfg['epochs'],
batch_size=32,
validation_data=(X_test, y_test),
verbose=1
)
[Link]((cfg, history))
# Plot training and validation loss
for cfg, history in histories:
[Link]([Link]['val_loss'], label=f"Val Loss (LR={cfg['lr']}, E={cfg['epochs']})")
[Link]([Link]['loss'], linestyle='--', label=f"Train Loss (LR={cfg['lr']}, E={cfg['epochs']})")
[Link]("LSTM Loss Comparison")
[Link]("Epochs")
[Link]("Loss (MSE)")
[Link]()
[Link](True)
[Link]()
Statement 32
Implement a CNN on Tomato dataset using batch sizes of 32 and 64 separately. Keep the
learning rate fixed at 0.0001 and compare results.
project_folder/
├── tomato_data/
│ ├── train/
│ │ ├── class1/
│ │ └── class2/
│ ├── val/
│ │ ├── class1/
│ │ └── class2/
import tensorflow as tf
from [Link] import Sequential
from [Link] import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from [Link] import ImageDataGenerator
import [Link] as plt
# Constants
IMG_SIZE = (128, 128)
EPOCHS = 10
LEARNING_RATE = 0.0001
DATA_DIR = 'tomato_data' # Adjust path if needed
# Data preparation
datagen = ImageDataGenerator(rescale=1./255)
train_batches = {}
val_batches = {}
for batch_size in [32, 64]:
train_batches[batch_size] = datagen.flow_from_directory(
DATA_DIR + '/train',
target_size=IMG_SIZE,
batch_size=batch_size,
class_mode='categorical',
shuffle=True
)
val_batches[batch_size] = datagen.flow_from_directory(
DATA_DIR + '/val',
target_size=IMG_SIZE,
batch_size=batch_size,
class_mode='categorical',
shuffle=False
)
# Define CNN model builder
def build_model(input_shape, num_classes):
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=input_shape),
MaxPooling2D(2, 2),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(2, 2),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
])
optimizer = [Link](learning_rate=LEARNING_RATE)
[Link](optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
return model
# Train and evaluate for both batch sizes
histories = {}
for batch_size in [32, 64]:
print(f"\nTraining with batch size {batch_size}")
model = build_model((IMG_SIZE[0], IMG_SIZE[1], 3), train_batches[batch_size].num_classes)
history = [Link](
train_batches[batch_size],
validation_data=val_batches[batch_size],
epochs=EPOCHS,
verbose=1
)
histories[batch_size] = history
# Plot results
for metric in ['loss', 'accuracy']:
[Link](figsize=(8, 5))
for batch_size in [32, 64]:
[Link](histories[batch_size].history[metric], label=f'Train {metric} (BS={batch_size})')
[Link](histories[batch_size].history['val_' + metric], linestyle='--', label=f'Val {metric}
(BS={batch_size})')
[Link](f'CNN {[Link]()} Comparison')
[Link]('Epochs')
[Link]([Link]())
[Link]()
[Link](True)
[Link]()
Statement 34
Implement CNN model on Potato leaf images using the Adam optimizer and i Use a learning rate
of
0.01 evaluate model Performance
project_folder/
├── potato_data/
│ ├── train/
│ │ ├── class1/
│ │ └── class2/
│ └── val/
│ ├── class1/
│ └── class2/
import tensorflow as tf
from [Link] import Sequential
from [Link] import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from [Link] import ImageDataGenerator
import [Link] as plt
from [Link] import classification_report, confusion_matrix
import numpy as np
# Constants
IMG_SIZE = (128, 128)
BATCH_SIZE = 32
EPOCHS = 10
LEARNING_RATE = 0.001
DATA_DIR = 'potato_data' # change to your dataset path
# Image preprocessing
datagen = ImageDataGenerator(rescale=1./255)
train_gen = datagen.flow_from_directory(
DATA_DIR + '/train',
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
shuffle=True
)
val_gen = datagen.flow_from_directory(
DATA_DIR + '/val',
target_size=IMG_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
shuffle=False
)
# Build CNN model
model = Sequential([
Conv2D(32, (3,3), activation='relu', input_shape=(IMG_SIZE[0], IMG_SIZE[1], 3)),
MaxPooling2D(2,2),
Conv2D(64, (3,3), activation='relu'),
MaxPooling2D(2,2),
Flatten(),
Dense(128, activation='relu'),
Dropout(0.5),
Dense(train_gen.num_classes, activation='softmax')
])
# Compile model with Adam optimizer
[Link](
optimizer=[Link](learning_rate=LEARNING_RATE),
loss='categorical_crossentropy',
metrics=['accuracy']
)
# Train the model
history = [Link](
train_gen,
validation_data=val_gen,
epochs=EPOCHS,
verbose=1
)
# Evaluate model performance
val_loss, val_acc = [Link](val_gen)
print(f"\nValidation Loss: {val_loss:.4f}")
print(f"Validation Accuracy: {val_acc:.4f}")
# Classification Report
val_gen.reset()
y_pred = [Link](val_gen)
y_pred_classes = [Link](y_pred, axis=1)
y_true = val_gen.classes
class_labels = list(val_gen.class_indices.keys())
print("\nClassification Report:")
print(classification_report(y_true, y_pred_classes, target_names=class_labels))
# Plotting training curves
[Link](figsize=(12,5))
[Link](1,2,1)
[Link]([Link]['accuracy'], label='Train Acc')
[Link]([Link]['val_accuracy'], label='Val Acc')
[Link]('Accuracy')
[Link]('Epochs')
[Link]('Accuracy')
[Link]()
[Link](1,2,2)
[Link]([Link]['loss'], label='Train Loss')
[Link]([Link]['val_loss'], label='Val Loss')
[Link]('Loss')
[Link]('Epochs')
[Link]('Loss')
[Link]()
plt.tight_layout()
[Link]()
Statement 35
Build a Deep Neural Network for Fashion MNIST Classification
● Load Fashion MNIST dataset
● Preprocess the data using standardization
● Define a feed-forward neural network with 3 Dense layers
● Use RMSprop optimizer and categorical crossentropy loss
● Train the model for 15 epochs and evaluate performance
● Plot the training and validation curves
import tensorflow as tf
from [Link] import Sequential
from [Link] import Dense, Flatten
from [Link] import RMSprop
from [Link] import to_categorical
import [Link] as plt
# Load Fashion MNIST dataset
(X_train, y_train), (X_test, y_test) = [Link].fashion_mnist.load_data()
# Standardization (mean=0, std=1)
X_train = X_train.astype('float32') / 255.0
X_test = X_test.astype('float32') / 255.0
# One-hot encoding of labels
y_train_cat = to_categorical(y_train, 10)
y_test_cat = to_categorical(y_test, 10)
# Define the model
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(10, activation='softmax')
])
# Compile the model
[Link](
optimizer=RMSprop(learning_rate=0.001),
loss='categorical_crossentropy',
metrics=['accuracy']
)
# Train the model
history = [Link](
X_train, y_train_cat,
validation_data=(X_test, y_test_cat),
epochs=15,
batch_size=32,
verbose=1
)
# Evaluate the model
loss, accuracy = [Link](X_test, y_test_cat)
print(f"\nTest Accuracy: {accuracy:.4f}")
print(f"Test Loss: {loss:.4f}")
# Plot training and validation curves
[Link](figsize=(12,5))
# Accuracy plot
[Link](1,2,1)
[Link]([Link]['accuracy'], label='Train Acc')
[Link]([Link]['val_accuracy'], label='Val Acc')
[Link]('Training and Validation Accuracy')
[Link]('Epochs')
[Link]('Accuracy')
[Link]()
# Loss plot
[Link](1,2,2)
[Link]([Link]['loss'], label='Train Loss')
[Link]([Link]['val_loss'], label='Val Loss')
[Link]('Training and Validation Loss')
[Link]('Epochs')
[Link]('Loss')
[Link]()
plt.tight_layout()
[Link]()