import cv2
import mediapipe as mp
import time
class HandDetector():
def
__init__(self,mode=False,max_num_hands=2,min_detection_confidence=0.5,min_track=0.5
):
self.mode=mode
self.maxHands=max_num_hands
self.minDetectionConfidence=min_detection_confidence
self.minTrackingConfidence=min_track
self.mpHands=mp.solutions.hands
self,hands=self.mpHands.Hands(self.mode,self.maxHands,self.minDetectionConfidence,)
self.mpDraw=mp.solutions.drawing_utils
def findPosition(self,img,draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self,img,handNo=0, draw=True):
lmlist = []
if self.results.multi_hand_landmarks:
myHand=self.results.multi_hand_landmarks[handNo].landmark
for id, lm in enumerate(myHand.landmark):
# print(id,lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
#print(id, cx, cy)
lmlist.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 30, (255, 255, 255), cv2.FILLED)
return lmlist
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = HandDetector()
while True:
success, img = cv2.read()
img=detector.findPosition(img)
lmlist=detector.findPosition(img)
if len(lmlist) != 0:
print(lmlist[4])
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,
255, 0), 2)
cv2.imshow('Video', img)
cv2.waitKey(1)
if __name__=="__main__":
main()