Practical- 5
Write a program to build auto-encoder in keras
Code:-
import numpy as np
from [Link] import Input, Dense
from [Link] import Model
# Generate some random data for demonstration
data = [Link](1000, 50)
# Define the dimensions of the input and encoding layers
input_dim = [Link][1]
encoding_dim = 10 # Choose an arbitrary size for the encoding layer
# Define the input layer
input_layer = Input(shape=(input_dim,))
# Define the encoding layer
encoded = Dense(encoding_dim, activation='relu')(input_layer)
# Define the decoding layer
decoded = Dense(input_dim, activation='sigmoid')(encoded)
# Create the autoencoder model
autoencoder = Model(input_layer, decoded)
# Compile the model
[Link](optimizer='adam', loss='binary_crossentropy')
# Train the autoencoder
[Link](data, data, epochs=50, batch_size=32, shuffle=True)
# Once trained, you can use the encoder part to get the encoded
representation of the input data
encoder = Model(input_layer, encoded)
encoded_data = [Link](data)
# You can also use the decoder part to reconstruct the input data from the
encoded representation
encoded_input = Input(shape=(encoding_dim,))
decoder_layer = [Link][-1]
decoder = Model(encoded_input, decoder_layer(encoded_input))
reconstructed_data = [Link](encoded_data)
output:-
Practical - 6
Write a program to implement basic reinforcement learning algorithm to teach a
bot to reach its destination.
Code:-
import numpy as np
# Define the grid world
GRID_SIZE = 5
START_STATE = (0, 0)
END_STATE = (4, 4)
# Define actions
ACTIONS = ['UP', 'DOWN', 'LEFT', 'RIGHT']
# Define rewards
REWARDS = {
(3, 4): 100, # Reward for reaching the destination
(1, 1): -10, # Penalty for entering a specific state
(2, 2): -5 # Penalty for entering a specific state
}
# Initialize Q-values
Q_values = [Link]((GRID_SIZE, GRID_SIZE, len(ACTIONS)))
# Define parameters
LEARNING_RATE = 0.1
DISCOUNT_FACTOR = 0.9
EPISODES = 1000
EPSILON = 0.1
# Function to choose action using epsilon-greedy policy
def choose_action(state):
if [Link](0, 1) < EPSILON:
return [Link](ACTIONS)
else:
return ACTIONS[[Link](Q_values[state[0]][state[1]])]
# Function to update Q-values using Q-learning
def update_Q_values(state, action, reward, next_state):
max_next_reward = [Link](Q_values[next_state[0]][next_state[1]])
Q_values[state[0]][state[1]][[Link](action)] += \
LEARNING_RATE * (reward + DISCOUNT_FACTOR * max_next_reward -
Q_values[state[0]][state[1]][[Link](action)])
# Function to perform one episode of training
def run_episode():
state = START_STATE
while state != END_STATE:
action = choose_action(state)
next_state = state
if action == 'UP':
next_state = (max(state[0] - 1, 0), state[1])
elif action == 'DOWN':
next_state = (min(state[0] + 1, GRID_SIZE - 1), state[1])
elif action == 'LEFT':
next_state = (state[0], max(state[1] - 1, 0))
elif action == 'RIGHT':
next_state = (state[0], min(state[1] + 1, GRID_SIZE - 1))
reward = 0
if next_state in REWARDS:
reward = REWARDS[next_state]
update_Q_values(state, action, reward, next_state)
state = next_state
# Train the agent
for _ in range(EPISODES):
run_episode()
# Function to get the optimal path
def get_optimal_path():
path = []
state = START_STATE
while state != END_STATE:
action = ACTIONS[[Link](Q_values[state[0]][state[1]])]
[Link]((state, action))
if action == 'UP':
state = (max(state[0] - 1, 0), state[1])
elif action == 'DOWN':
state = (min(state[0] + 1, GRID_SIZE - 1), state[1])
elif action == 'LEFT':
state = (state[0], max(state[1] - 1, 0))
elif action == 'RIGHT':
state = (state[0], min(state[1] + 1, GRID_SIZE - 1))
[Link]((state, 'GOAL'))
return path
# Print the optimal path
optimal_path = get_optimal_path()
for step in optimal_path:
print(step)
Practical – 7
(a) Write a program to implement a recurrent neural network.
Code:-
import numpy as np
# Define the sigmoid activation function
def sigmoid(x):
return 1 / (1 + [Link](-x))
# Define the RNN class
class RNN:
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
[Link] = [Link](hidden_size, input_size)
[Link] = [Link](hidden_size, hidden_size)
[Link] = [Link](output_size, hidden_size)
[Link] = [Link]((hidden_size, 1))
[Link] = [Link]((output_size, 1))
def forward(self, inputs):
h = [Link]((self.hidden_size, 1))
for x in inputs:
h = [Link]([Link]([Link], x) + [Link]([Link], h) +
[Link])
output = [Link]([Link], h) + [Link]
return output
# Example usage
input_size = 3
hidden_size = 4
output_size = 2
rnn = RNN(input_size, hidden_size, output_size)
inputs = [[Link](input_size, 1) for _ in range(5)]
output = [Link](inputs)
print(output)
(b) Write a program to implement LSTM and perform time series
analysis using LSTM.
Code:-
import numpy as np
from [Link] import Sequential
from [Link] import LSTM, Dense
# Generate some random data for demonstration
data = [Link](1000, 1)
# Prepare the data for LSTM
def prepare_data(data, n_steps):
X, y = [], []
for i in range(len(data)):
end_ix = i + n_steps
if end_ix > len(data)-1:
break
[Link](data[i:end_ix, 0])
[Link](data[end_ix, 0])
return [Link](X), [Link](y)
n_steps = 3
X, y = prepare_data(data, n_steps)
# Reshape data for LSTM [samples, timesteps, features]
X = [Link]([Link][0], [Link][1], 1)
# Define the LSTM model
model = Sequential()
[Link](LSTM(50, activation='relu', input_shape=(n_steps, 1)))
[Link](Dense(1))
[Link](optimizer='adam', loss='mse')
# Fit the model
[Link](X, y, epochs=200, verbose=0)
# Make predictions
predictions = [Link](X, verbose=0)