67 (1) - Merged
67 (1) - Merged
class Graph {
public:
unordered_map<int, vector<int>> adjList;
int main() {
Graph g;
g.addEdge(0, 1);
g.addEdge(0, 2);
g.addEdge(1, 3);
g.addEdge(1, 4);
g.addEdge(2, 5);
g.addEdge(2, 6);
g.dfs(0);
return 0;
}
#define COMPUTER 1
#define HUMAN 2
#define SIDE 3 // Length of the board
#define COMPUTERMOVE 'O'
#define HUMANMOVE 'X'
// Minimax function
int minimax(char board[SIDE][SIDE], int depth, bool isAI) {
int score = evaluate(board);
if (isAI) {
int best = -1000;
for (int i = 0; i < SIDE; i++) {
for (int j = 0; j < SIDE; j++) {
if (board[i][j] == ' ') {
board[i][j] = COMPUTERMOVE;
best = max(best, minimax(board, depth + 1, !isAI));
board[i][j] = ' ';
}
}
}
return best;
} else {
int best = 1000;
for (int i = 0; i < SIDE; i++) {
for (int j = 0; j < SIDE; j++) {
if (board[i][j] == ' ') {
board[i][j] = HUMANMOVE;
best = min(best, minimax(board, depth + 1, !isAI));
board[i][j] = ' ';
}
}
}
return best;
}
}
int moveCount = 0;
while (isMovesLeft(board)) {
showBoard(board);
if (whoseTurn == HUMAN) {
int move;
cout << "Enter your move (1-9): ";
cin >> move;
int x = (move - 1) / SIDE, y = (move - 1) % SIDE;
if (board[x][y] == ' ') {
board[x][y] = HUMANMOVE;
moveCount++;
if (evaluate(board) == -10) {
showBoard(board);
declareWinner(HUMAN);
return;
}
whoseTurn = COMPUTER;
} else {
cout << "Invalid move! Try again.\n";
}
} else {
pair<int, int> bestMove = findBestMove(board);
board[bestMove.first][bestMove.second] = COMPUTERMOVE;
moveCount++;
if (evaluate(board) == 10) {
showBoard(board);
declareWinner(COMPUTER);
return;
}
whoseTurn = HUMAN;
}
}
showBoard(board);
cout << "It's a tie!\n";
}
// Driver code
int main() {
cout << "Welcome to Tic-Tac-Toe!\n";
char playAgain;
do {
playTicTacToe(HUMAN);
cout << "Do you want to play again? (y/n): ";
cin >> playAgain;
} while (playAgain == 'y' || playAgain == 'Y');
return 0;
}
INDO GLOBAL COLLEGE OF ENGINEERING
ARTIFICIAL INTELLIGENCE PRACTICAL FILE
PRACTICAL:-04
Aim:-
#include <iostream>
#include <vector>
#include <string>
#include <unordered_map>
#include <set>
class Node {
public:
std::string name;
std::set<Node*> parents; // Parents of the node
std::unordered_map<std::string, double> probabilities; // Conditional probabilities
void display() {
std::cout << "Node: " << name << "\n";
std::cout << "Parents: ";
for (auto parent : parents) {
std::cout << parent->name << " ";
}
std::cout << "\nProbabilities:\n";
for (const auto& prob : probabilities) {
std::cout << " P(" << name << "|" << prob.first << ") = " << prob.second << "\n";
}
}
INDO GLOBAL COLLEGE OF ENGINEERING
ARTIFICIAL INTELLIGENCE PRACTICAL FILE
};
class BayesianNetwork {
public:
std::vector<Node*> nodes;
return probability;
}
void display() {
for (auto node : nodes) {
node->display();
std::cout << "\n";
}
}
};
int main() {
// Create nodes
Node* rain = new Node("Rain");
Node* traffic = new Node("Traffic");
Node* accident = new Node("Accident");
INDO GLOBAL COLLEGE OF ENGINEERING
ARTIFICIAL INTELLIGENCE PRACTICAL FILE
// Define relationships
traffic->addParent(rain);
accident->addParent(traffic);
// Set probabilities
rain->setProbability("True", 0.2); // P(Rain=True) = 0.2
rain->setProbability("False", 0.8); // P(Rain=False) = 0.8
// Perform inference
double accidentProbability = bn.infer(accident, evidence);
std::cout << "P(Accident | Evidence) = " << accidentProbability << "\n";
}
Output:-
Node: Rain
Parents:
Probabilities:
P(Rain|False) = 0.8
P(Rain|True) = 0.2
INDO GLOBAL COLLEGE OF ENGINEERING
ARTIFICIAL INTELLIGENCE PRACTICAL FILE
Node: Traffic
Parents: Rain
Probabilities:
P(Traffic|False) = 0.6
P(Traffic|True) = 0.4
Node: Accident
Parents: Traffic
Probabilities:
P(Accident|False) = 0.8
P(Accident|True) = 0.2
PRACTICAL:-05
Aim:-
#include <iostream>
#include <vector>
#include <iomanip>
#include <cmath>
struct State {
int x, y; // Position in the grid
double value; // Value of the state
Action bestAction; // Best action to take from this state
};
class GridWorld {
public:
GridWorld() {
// Initialize the grid
for (int i = 0; i < GRID_SIZE; ++i) {
for (int j = 0; j < GRID_SIZE; ++j) {
states.push_back({i, j, 0.0, UP});
}
}
// Set the goal state
goalState = &states[GRID_SIZE * (GRID_SIZE - 1) + (GRID_SIZE - 1)];
}
void valueIteration() {
bool converged = false;
while (!converged) {
converged = true;
INDO GLOBAL COLLEGE OF ENGINEERING
ARTIFICIAL INTELLIGENCE PRACTICAL FILE
void printValues() {
for (int i = 0; i < GRID_SIZE; ++i) {
for (int j = 0; j < GRID_SIZE; ++j) {
std::cout << std::fixed << std::setprecision(2) << states[i * GRID_SIZE + j].value
<< "\t";
}
std::cout << std::endl;
}
}
private:
std::vector<State> states;
State* goalState;
int main() {
GridWorld grid;
grid.valueIteration();
std::cout << "State Values after Value Iteration:\n";
grid.printValues();
return 0;
}
Output:-
PRACTICAL:-06
Aim:-
#include <iostream>
#include <vector>
#include <cstdlib>
#include <ctime>
#include <iomanip>
#include <algorithm>
struct State {
int x, y; // Position in the grid
};
class QLearning {
public:
QLearning() {
// Initialize Q-values to zero
qValues.resize(GRID_SIZE, std::vector<std::vector<double>>(GRID_SIZE,
std::vector<double>(4, 0.0)));
srand(static_cast<unsigned int>(time(0))); // Seed for random number generation
}
void run() {
for (int episode = 0; episode < EPISODES; ++episode) {
State state = {0, 0}; // Start at the top-left corner
while (!isGoal(state)) {
Action action = chooseAction(state);
State nextState = takeAction(state, action);
double reward = getReward(nextState);
INDO GLOBAL COLLEGE OF ENGINEERING
ARTIFICIAL INTELLIGENCE PRACTICAL FILE
void printQValues() {
for (int i = 0; i < GRID_SIZE; ++i) {
for (int j = 0; j < GRID_SIZE; ++j) {
std::cout << std::fixed << std::setprecision(2);
std::cout << "[" << qValues[i][j][UP] << ", " << qValues[i][j][DOWN] << ", "
<< qValues[i][j][LEFT] << ", " << qValues[i][j][RIGHT] << "]\t";
}
std::cout << std::endl;
}
}
private:
std::vector<std::vector<std::vector<double>>> qValues; // Q-values for each state and
action
// Get all actions with max Q-value (avoid bias in case of ties)
std::vector<Action> bestActions;
for (int action = 0; action < 4; ++action) {
if (qValues[state.x][state.y][action] == maxQValue) {
bestActions.push_back(static_cast<Action>(action));
INDO GLOBAL COLLEGE OF ENGINEERING
ARTIFICIAL INTELLIGENCE PRACTICAL FILE
}
}
void updateQValue(const State& state, Action action, double reward, const State&
nextState) {
// Get the max Q-value for the next state
double maxNextQValue = *std::max_element(qValues[nextState.x][nextState.y].begin(),
qValues[nextState.x][nextState.y].end());
int main() {
QLearning qLearning;
qLearning.run();
std::cout << "Q-values after training:\n";
qLearning.printQValues();
return 0;
}
INDO GLOBAL COLLEGE OF ENGINEERING
ARTIFICIAL INTELLIGENCE PRACTICAL FILE
Output:-
Q-values after training:
[0.05, 0.12, 0.04, 0.18] [0.14, 0.31, 0.05, 0.26] [0.00, 0.46, -0.04, 0.05] [-
0.02, 0.60, -0.03, 0.00]
[-0.13, -0.13, -0.10, 0.31] [0.15, 0.29, 0.11, 0.46] [0.28, 0.56, 0.29, 0.62]
[0.36, 0.80, 0.40, 0.58]
[-0.08, -0.07, -0.07, 0.01] [0.03, 0.54, -0.04, -0.03] [0.08, 0.14, 0.07, 0.80]
[0.59, 1.00, 0.56, 0.73]
[-0.03, -0.03, -0.03, 0.10] [0.01, 0.01, -0.02, 0.78] [-0.02, 0.13, 0.05, 1.00]
[0.00, 0.00, 0.00, 0.00]