90 lines
3.6 KiB
Python
90 lines
3.6 KiB
Python
import cv2, mediapipe as mp, json, time, math, numpy as np
|
||
from multiprocessing import Queue
|
||
|
||
# --------------- Hilfsfunktion: Projektionsfläche auf Bildschirm mappen ---------------
|
||
def map_to_screen(x, y, calib_points, screen_size=(800, 600)):
|
||
pts_src = np.array(calib_points, dtype=np.float32)
|
||
pts_dst = np.array([[0,0],[screen_size[0],0],
|
||
[screen_size[0],screen_size[1]],[0,screen_size[1]]],
|
||
dtype=np.float32)
|
||
M = cv2.getPerspectiveTransform(pts_src, pts_dst)
|
||
p = np.array([[[x,y]]], dtype=np.float32)
|
||
mapped = cv2.perspectiveTransform(p, M)[0][0]
|
||
return int(mapped[0]), int(mapped[1])
|
||
|
||
# --------------- Hauptfunktion ---------------
|
||
def run_gesture_input(queue: Queue,
|
||
touch_cam_index=0,
|
||
gesture_cam_index=1,
|
||
screen_size=(800,600)):
|
||
|
||
mp_hands = mp.solutions.hands
|
||
mp_draw = mp.solutions.drawing_utils
|
||
hands_touch = mp_hands.Hands(max_num_hands=1, min_detection_confidence=0.6)
|
||
hands_gesture = mp_hands.Hands(max_num_hands=2, min_detection_confidence=0.6)
|
||
|
||
# Kalibrierung laden
|
||
try:
|
||
calib_points = json.load(open("calibration.json"))
|
||
print("📄 Kalibrierung geladen:", calib_points)
|
||
except:
|
||
print("⚠️ Keine calibration.json – zuerst calibrate_touch.py ausführen!")
|
||
return
|
||
|
||
cam_touch = cv2.VideoCapture(touch_cam_index)
|
||
cam_gesture = cv2.VideoCapture(gesture_cam_index)
|
||
|
||
prev_clap_time = 0
|
||
clap_cooldown = 1.5
|
||
|
||
while True:
|
||
ok1, frame_touch = cam_touch.read()
|
||
ok2, frame_gest = cam_gesture.read()
|
||
if not ok1 or not ok2:
|
||
print("⚠️ Kamera nicht verfügbar"); break
|
||
|
||
frame_touch = cv2.flip(frame_touch, 1)
|
||
frame_gest = cv2.flip(frame_gest, 1)
|
||
|
||
# ---------- Touch erkennen ----------
|
||
rgb_t = cv2.cvtColor(frame_touch, cv2.COLOR_BGR2RGB)
|
||
res_t = hands_touch.process(rgb_t)
|
||
h, w, _ = frame_touch.shape
|
||
if res_t.multi_hand_landmarks:
|
||
lm = res_t.multi_hand_landmarks[0]
|
||
mp_draw.draw_landmarks(frame_touch, lm, mp_hands.HAND_CONNECTIONS)
|
||
fx = int(lm.landmark[8].x * w)
|
||
fy = int(lm.landmark[8].y * h)
|
||
sx, sy = map_to_screen(fx, fy, calib_points, screen_size)
|
||
if lm.landmark[8].y > 0.8:
|
||
queue.put(("touch",(sx,sy)))
|
||
cv2.putText(frame_touch,f"Touch ({sx},{sy})",(40,60),
|
||
cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,255,0),2)
|
||
|
||
# ---------- Klatschen / Bewegung ----------
|
||
rgb_g = cv2.cvtColor(frame_gest, cv2.COLOR_BGR2RGB)
|
||
res_g = hands_gesture.process(rgb_g)
|
||
gh, gw, _ = frame_gest.shape
|
||
if res_g.multi_hand_landmarks and len(res_g.multi_hand_landmarks)==2:
|
||
h1, h2 = res_g.multi_hand_landmarks
|
||
x1 = np.mean([p.x for p in h1.landmark])*gw
|
||
y1 = np.mean([p.y for p in h1.landmark])*gh
|
||
x2 = np.mean([p.x for p in h2.landmark])*gw
|
||
y2 = np.mean([p.y for p in h2.landmark])*gh
|
||
dist = math.hypot(x2-x1, y2-y1)
|
||
if dist < 100 and (time.time()-prev_clap_time)>clap_cooldown:
|
||
queue.put(("clap",None))
|
||
prev_clap_time=time.time()
|
||
cv2.putText(frame_gest,"👏",(int(gw/2)-20,80),
|
||
cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,255),3)
|
||
|
||
# Anzeigen
|
||
cv2.imshow("Touch-Cam", frame_touch)
|
||
cv2.imshow("Gesture-Cam", frame_gest)
|
||
if cv2.waitKey(5)&0xFF==27:
|
||
break
|
||
|
||
cam_touch.release()
|
||
cam_gesture.release()
|
||
cv2.destroyAllWindows()
|