Experiment-10
import cv2
print(cv2.__version__)
import mediapipe as mp
# Initialize MediaPipe hands and pose solutions
mp_hands = [Link]
mp_pose = [Link]
mp_drawing = [Link].drawing_utils
# Initialize video capture
cap = [Link](0)
with mp_hands.Hands(min_detection_confidence=0.5, min_tracking_confidence=0.5) as
hands, \
mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as
pose:
while [Link]():
ret, frame = [Link]()
if not ret:
break
# Flip the frame for a more intuitive webcam view
frame = [Link](frame, 1)
# Convert the frame to RGB
frame_rgb = [Link](frame, cv2.COLOR_BGR2RGB)
# Process the hand and pose detections
hand_results = [Link](frame_rgb)
pose_results = [Link](frame_rgb)
# Draw hand landmarks
if hand_results.multi_hand_landmarks:
for hand_landmarks in hand_results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
frame, hand_landmarks, mp_hands.HAND_CONNECTIONS,
mp_drawing.DrawingSpec(color=(0, 255, 0), thickness=2,
circle_radius=2),
mp_drawing.DrawingSpec(color=(0, 0, 255), thickness=2,
circle_radius=2))
# Draw pose landmarks
if pose_results.pose_landmarks:
mp_drawing.draw_landmarks(
frame, pose_results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(255, 0, 0), thickness=2,
circle_radius=2),
mp_drawing.DrawingSpec(color=(0, 255, 255), thickness=2,
circle_radius=2))
# Display the frame
[Link]('Hand Gesture and Human Pose Detection', frame)
6
# Break the loop on pressing 'q'
if [Link](1) & 0xFF == ord('q'):
break
# Release resources
[Link]()
[Link]()