import cv2
import face_recognition
import pyttsx3
import tkinter as tk
from tkinter import messagebox
from PIL import Image, ImageTk
import threading
# Initialize the text-to-speech engine
engine = [Link]()
# Load known faces and their names
known_faces = []
known_names = []
# Function to add a new face to the system
def add_face(image_path, name):
image = face_recognition.load_image_file(image_path)
face_encoding = face_recognition.face_encodings(image)[0]
known_faces.append(face_encoding)
known_names.append(name)
# Add known faces (you can replace with actual file paths)
add_face("path_to_person1_image.jpg", "Person 1")
add_face("path_to_person2_image.jpg", "Person 2")
# Initialize the Tkinter window
root = [Link]()
[Link]("Face Recognition with Speech")
# Video capture object
video_cap = [Link](0)
is_running = False
# Create a label to display the detected name
name_label = [Link](root, text="Detected Name: Unknown", font=("Helvetica", 16))
name_label.pack(pady=10)
# Create a label to display the webcam feed
video_label = [Link](root)
video_label.pack()
# Function to start the webcam and detect faces
def start_video():
global is_running
is_running = True
[Link](target=video_loop).start()
# Function to stop the webcam
def stop_video():
global is_running
is_running = False
# Function to process video feed and detect faces
def video_loop():
while is_running:
ret, video_data = video_cap.read()
if not ret:
print("Failed to grab frame.")
break
# Convert the frame to RGB (face_recognition works with RGB, not BGR)
rgb_frame = [Link](video_data, cv2.COLOR_BGR2RGB)
# Find all face locations and face encodings in the current frame
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
name = "Unknown"
for (top, right, bottom, left), face_encoding in zip(face_locations,
face_encodings):
# Check if the face matches any known face
matches = face_recognition.compare_faces(known_faces, face_encoding)
if True in matches:
first_match_index = [Link](True)
name = known_names[first_match_index]
# Draw a rectangle around the face
[Link](video_data, (left, top), (right, bottom), (0, 255, 0), 2)
font = cv2.FONT_HERSHEY_DUPLEX
[Link](video_data, name, (left + 6, bottom - 6), font, 0.5, (255,
255, 255), 1)
# Update the GUI with the detected name
name_label.config(text=f"Detected Name: {name}")
# Speak the name of the detected person
[Link](name)
[Link]()
# Convert the image to a format that Tkinter can handle
video_data = [Link](video_data, cv2.COLOR_BGR2RGB)
img = [Link](video_data)
img = [Link]((640, 480)) # Resize for display
img_tk = [Link](img)
# Update the video feed display
video_label.img_tk = img_tk # Keep a reference to avoid garbage collection
video_label.config(image=img_tk)
# Update the GUI regularly
root.update_idletasks()
[Link]()
# Create buttons to start and stop the webcam feed
start_button = [Link](root, text="Start Video", font=("Helvetica", 14),
command=start_video)
start_button.pack(side=[Link], padx=10, pady=20)
stop_button = [Link](root, text="Stop Video", font=("Helvetica", 14),
command=stop_video)
stop_button.pack(side=[Link], padx=10, pady=20)
# Function to add a known face (for GUI usage)
def add_known_face():
def submit_face():
image_path = entry_image_path.get()
name = entry_name.get()
try:
add_face(image_path, name)
[Link]("Success", f"Added {name} to the system!")
except Exception as e:
[Link]("Error", f"Error adding face: {str(e)}")
add_face_window = [Link](root)
add_face_window.title("Add Known Face")
[Link](add_face_window, text="Image Path:").pack(pady=5)
entry_image_path = [Link](add_face_window, width=40)
entry_image_path.pack(pady=5)
[Link](add_face_window, text="Name:").pack(pady=5)
entry_name = [Link](add_face_window, width=40)
entry_name.pack(pady=5)
submit_button = [Link](add_face_window, text="Submit", command=submit_face)
submit_button.pack(pady=10)
# Button to add a new known face
add_face_button = [Link](root, text="Add Known Face", font=("Helvetica", 14),
command=add_known_face)
add_face_button.pack(pady=20)
# Start the Tkinter main loop
[Link]()
# Release the video capture object after the GUI window is closed
video_cap.release()
[Link]()