IT WORKS 11!!!!!!!!!!!!!!!!!

Es funktioniert und umbennungen
This commit is contained in:
Kristoph Laemmerzahl 2025-12-03 16:08:36 +01:00
parent 316a2be7f0
commit 953a0d294e
11 changed files with 955 additions and 519 deletions

View File

@ -4,7 +4,7 @@
<content url="file://$MODULE_DIR$"> <content url="file://$MODULE_DIR$">
<excludeFolder url="file://$MODULE_DIR$/.venv" /> <excludeFolder url="file://$MODULE_DIR$/.venv" />
</content> </content>
<orderEntry type="inheritedJdk" /> <orderEntry type="jdk" jdkName="Python 3.12 (pythonProject)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" /> <orderEntry type="sourceFolder" forTests="false" />
</component> </component>
</module> </module>

2
.idea/misc.xml generated
View File

@ -3,5 +3,5 @@
<component name="Black"> <component name="Black">
<option name="sdkName" value="Python 3.12 (Memory GlobalMatch)" /> <option name="sdkName" value="Python 3.12 (Memory GlobalMatch)" />
</component> </component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.12 (Memory GlobalMatch)" project-jdk-type="Python SDK" /> <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.12 (pythonProject)" project-jdk-type="Python SDK" />
</project> </project>

View File

@ -2,7 +2,7 @@ import cv2
import json import json
import numpy as np import numpy as np
CAM_INDEX = 0 CAM_INDEX = 1
OUTPUT_FILE = "calibration.json" OUTPUT_FILE = "calibration.json"
# Reihenfolge: P1=oben links, P2=oben rechts, P3=unten rechts, P4=unten links # Reihenfolge: P1=oben links, P2=oben rechts, P3=unten rechts, P4=unten links
@ -75,7 +75,7 @@ def main():
ok, frame = cap.read() ok, frame = cap.read()
if not ok: if not ok:
break break
frame = cv2.flip(frame, 1) frame = cv2.flip(frame, -1)#<----------------------------------------------------------------Flip old:frame = cv2.flip(frame, 1)
h, w, _ = frame.shape h, w, _ = frame.shape

View File

@ -1 +1 @@
[[164, 384], [370, 425], [444, 487], [447, 393]] [[24, 118], [630, 135], [627, 459], [36, 461]]

View File

@ -1,23 +1,21 @@
import cv2 import cv2
import mediapipe as mp import mediapipe as mp
import numpy as np import numpy as np
import math, time import math, time, json
from pythonosc import udp_client from pythonosc import udp_client
# ------------------------------- # -------------------------------
# SETTINGS # SETTINGS
# ------------------------------- # -------------------------------
TOUCH_CAM_INDEX = 0 # deine Touch-Kamera TOUCH_CAM_INDEX = 1 # deine Touch-Kamera
GESTURE_CAM_INDEX = 2 # deine Clap / Gesture Kamera GESTURE_CAM_INDEX = 0 # Clap/Gesture Kamera
GAME_SCREEN_WIDTH = 900 # muss zu deinem Pygame-Fenster passen! GAME_SCREEN_WIDTH = 900
GAME_SCREEN_HEIGHT = 600 GAME_SCREEN_HEIGHT = 600
# Wie "streng" ist der Touch? STILL_REQUIRED = 1.0 # Sekunden die der Finger stabil sein muss
STILL_REQUIRED = 1.0 # Sekunden, die der Finger fast still sein muss MOVE_TOLERANCE = 25 # Bewegungsschwelle (Pixel)
MOVE_TOLERANCE = 25 # maximal erlaubte Bewegung (Pixel)
# OSC Client → sendet ans Spiel
client = udp_client.SimpleUDPClient("127.0.0.1", 5005) client = udp_client.SimpleUDPClient("127.0.0.1", 5005)
# Globale Zustände # Globale Zustände
@ -26,6 +24,46 @@ finger_still_start = None
prev_touch_time = 0.0 prev_touch_time = 0.0
prev_clap_time = 0.0 prev_clap_time = 0.0
# -------------------------------------
# KALIBRIERUNG LADEN + HOMOGRAPHIE
# -------------------------------------
try:
with open("calibration.json", "r") as f:
CALIB_POINTS = json.load(f)
print("📐 Kalibrierung geladen:", CALIB_POINTS)
except:
CALIB_POINTS = None
print("⚠️ Keine Kalibrierung gefunden benutze Rohkoordinaten!")
H = None
if CALIB_POINTS is not None:
src = np.array(CALIB_POINTS, dtype=np.float32)
dst = np.array([
[0, 0],
[GAME_SCREEN_WIDTH, 0],
[GAME_SCREEN_WIDTH, GAME_SCREEN_HEIGHT],
[0, GAME_SCREEN_HEIGHT]
], dtype=np.float32)
H, _ = cv2.findHomography(src, dst)
print("📐 Homographie-Matrix berechnet!")
def map_point_homography(x, y):
""" Wandelt Kamera-Koordinaten → Bildschirmkoordinaten um """
global H
if H is None:
# fallback: KEINE Skalierung (Variante 1 bedeutet reines Homography)
return int(x), int(y)
p = np.array([[[x, y]]], dtype=np.float32)
mapped = cv2.perspectiveTransform(p, H)[0][0]
return int(mapped[0]), int(mapped[1])
# -----------------------------------------------------------------
def run_gesture_input(): def run_gesture_input():
global last_finger_pos, finger_still_start global last_finger_pos, finger_still_start
@ -37,17 +75,16 @@ def run_gesture_input():
hands_touch = mp_hands.Hands(max_num_hands=1, min_detection_confidence=0.6) hands_touch = mp_hands.Hands(max_num_hands=1, min_detection_confidence=0.6)
hands_gesture = mp_hands.Hands(max_num_hands=2, min_detection_confidence=0.6) hands_gesture = mp_hands.Hands(max_num_hands=2, min_detection_confidence=0.6)
# Kameras öffnen cam_touch = cv2.VideoCapture(TOUCH_CAM_INDEX)#<--------------------------------------------------------------------------Flip old:frame_touch = cv2.flip(frame_touch, 1)
cam_touch = cv2.VideoCapture(TOUCH_CAM_INDEX)
cam_gesture = cv2.VideoCapture(GESTURE_CAM_INDEX) cam_gesture = cv2.VideoCapture(GESTURE_CAM_INDEX)
if not cam_touch.isOpened(): if not cam_touch.isOpened():
print(" Touch-Kamera konnte NICHT geöffnet werden!") print(" Touch-Kamera konnte NICHT geöffnet werden!")
else: else:
print(f"Touch-Kamera geöffnet (Index {TOUCH_CAM_INDEX})") print(f"Touch-Kamera geöffnet (Index {TOUCH_CAM_INDEX})")
if not cam_gesture.isOpened(): if not cam_gesture.isOpened():
print(" Gesture-Kamera konnte NICHT geöffnet werden!") print(" Gesture-Kamera konnte NICHT geöffnet werden!")
else: else:
print(f"Gesture-Kamera geöffnet (Index {GESTURE_CAM_INDEX})") print(f"Gesture-Kamera geöffnet (Index {GESTURE_CAM_INDEX})")
@ -58,24 +95,24 @@ def run_gesture_input():
ok2, frame_gest = cam_gesture.read() ok2, frame_gest = cam_gesture.read()
if not ok1 or not ok2: if not ok1 or not ok2:
print(" Eine Kamera liefert kein Bild.") print(" Eine Kamera liefert kein Bild.")
break break
frame_touch = cv2.flip(frame_touch, 1) frame_touch = cv2.flip(frame_touch, -1)
frame_gest = cv2.flip(frame_gest, 1) frame_gest = cv2.flip(frame_gest, 1)
# ---------------------------------------
# TOUCH (Zeigefinger) mit STILLSTAND
# ---------------------------------------
rgb_t = cv2.cvtColor(frame_touch, cv2.COLOR_BGR2RGB) rgb_t = cv2.cvtColor(frame_touch, cv2.COLOR_BGR2RGB)
res_t = hands_touch.process(rgb_t) res_t = hands_touch.process(rgb_t)
th, tw, _ = frame_touch.shape #h= Höhe, w = Breite th, tw, _ = frame_touch.shape
# -------------------------------------------------------------
# TOUCH detection
# -------------------------------------------------------------
if res_t.multi_hand_landmarks: if res_t.multi_hand_landmarks:
lm = res_t.multi_hand_landmarks[0] lm = res_t.multi_hand_landmarks[0]
mp_draw.draw_landmarks(frame_touch, lm, mp_hands.HAND_CONNECTIONS) mp_draw.draw_landmarks(frame_touch, lm, mp_hands.HAND_CONNECTIONS)
# Finger muss nach UNTEN zeigen (8 tiefer als 5) # Finger zeigt nach unten: landmark 8 tiefer als 5
if lm.landmark[8].y < lm.landmark[5].y: if lm.landmark[8].y < lm.landmark[5].y:
last_finger_pos = None last_finger_pos = None
finger_still_start = None finger_still_start = None
@ -84,15 +121,14 @@ def run_gesture_input():
fx = int(lm.landmark[8].x * tw) fx = int(lm.landmark[8].x * tw)
fy = int(lm.landmark[8].y * th) fy = int(lm.landmark[8].y * th)
sx = int(fx * (GAME_SCREEN_WIDTH / tw)) # → Homographie anwenden
sy = int(fy * (GAME_SCREEN_HEIGHT / th)) sx, sy = map_point_homography(fx, fy)
now = time.time() now = time.time()
current_pos = (fx, fy) current_pos = (fx, fy)
# erster Messpunkt # erster Messpunkt
if last_finger_pos is None: if last_finger_pos is None:
#erster Punkt
last_finger_pos = current_pos last_finger_pos = current_pos
finger_still_start = now finger_still_start = now
else: else:
@ -100,66 +136,31 @@ def run_gesture_input():
current_pos[1] - last_finger_pos[1]) current_pos[1] - last_finger_pos[1])
if dist < MOVE_TOLERANCE: if dist < MOVE_TOLERANCE:
#Finger ist "ruhig"
if finger_still_start is None: if finger_still_start is None:
finger_still_start = now finger_still_start = now
else: else:
still_time = now - finger_still_start still_time = now - finger_still_start
if still_time >= STILL_REQUIRED and (now - prev_touch_time) > 0.5: if still_time >= STILL_REQUIRED and (now - prev_touch_time) > 0.5:
client.send_message("/touch", [sx, sy]) client.send_message("/touch", [sx, sy])
print(f"👉 STABILER TOUCH bei {sx},{sy} nach {still_time:.2f}s") print(f"👉 TOUCH bei {sx},{sy} nach {still_time:.2f}s")
prev_touch_time = now prev_touch_time = now
finger_still_start = None finger_still_start = None
else: else:
finger_still_start = now finger_still_start = now
# IMMER aktualisieren
last_finger_pos = current_pos last_finger_pos = current_pos
# Finger visualisieren
cv2.circle(frame_touch, (fx, fy), 10, (0, 255, 0), -1) cv2.circle(frame_touch, (fx, fy), 10, (0, 255, 0), -1)
cv2.putText(frame_touch, f"{sx},{sy}", (fx + 10, fy - 10), cv2.putText(frame_touch, f"{sx},{sy}", (fx + 10, fy - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,0), 2)
now = time.time()
current_pos = (fx, fy)
if last_finger_pos is None:
# erster Punkt
last_finger_pos = current_pos
finger_still_start = now
else: else:
dist = math.hypot(current_pos[0] - last_finger_pos[0],
current_pos[1] - last_finger_pos[1])
if dist < MOVE_TOLERANCE:
# Finger ist "ruhig"
if finger_still_start is None:
finger_still_start = now
else:
still_time = now - finger_still_start
if still_time >= STILL_REQUIRED and (now - prev_touch_time) > 0.5:
# JETZT: stabiler Touch → sende genau 1 Klick
client.send_message("/touch", [sx, sy])
print(f"STABILER TOUCH bei {sx},{sy} nach {still_time:.2f}s")
print("SCREEN COORD:", sx, sy)
prev_touch_time = now
# reset, damit der nächste Touch erst nach neuer Bewegung kommt
finger_still_start = None
else:
# Finger hat sich deutlich bewegt → Timer neu starten
finger_still_start = now
last_finger_pos = current_pos
else:
# keine Hand → Reset
last_finger_pos = None last_finger_pos = None
finger_still_start = None finger_still_start = None
# --------------------------------------- # -------------------------------------------------------------
# CLAP (zwei Hände) # GESTURE detection (clap)
# --------------------------------------- # -------------------------------------------------------------
rgb_g = cv2.cvtColor(frame_gest, cv2.COLOR_BGR2RGB) rgb_g = cv2.cvtColor(frame_gest, cv2.COLOR_BGR2RGB)
res_g = hands_gesture.process(rgb_g) res_g = hands_gesture.process(rgb_g)
gh, gw, _ = frame_gest.shape gh, gw, _ = frame_gest.shape
@ -179,7 +180,7 @@ def run_gesture_input():
client.send_message("/clap", 1) client.send_message("/clap", 1)
print("👏 SEND /clap") print("👏 SEND /clap")
cv2.putText(frame_gest, "👏", (int(gw/2)-20, 80), cv2.putText(frame_gest, "👏", (int(gw/2)-20, 80),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 3) cv2.FONT_HERSHEY_SIMPLEX, 2, (0,255,255), 3)
cv2.imshow("Touch-Cam", frame_touch) cv2.imshow("Touch-Cam", frame_touch)
cv2.imshow("Gesture-Cam", frame_gest) cv2.imshow("Gesture-Cam", frame_gest)

View File

@ -0,0 +1,120 @@
import cv2
import json
import numpy as np
CAM_INDEX = 1
OUTPUT_FILE = "calibration.json"
# Reihenfolge: P1=oben links, P2=oben rechts, P3=unten rechts, P4=unten links
POINT_NAMES = ["P1 (oben links)", "P2 (oben rechts)", "P3 (unten rechts)", "P4 (unten links)"]
def dist(a, b):
return np.linalg.norm(np.array(a) - np.array(b))
def main():
cap = cv2.VideoCapture(CAM_INDEX)
if not cap.isOpened():
print("❌ Kamera konnte nicht geöffnet werden!")
return
print("📸 Kalibrierung gestartet (nur Maus)")
print("----------------------------------")
print("🖱 Linksklick = Punkt setzen")
print("🖱 Ziehen = Punkt verschieben")
print("🖱 Rechtsklick = Punkt löschen")
print("💾 S / Enter = Speichern")
print("❌ ESC = Abbrechen")
print("----------------------------------")
print("Bitte Punkte in dieser Reihenfolge setzen:")
for i, name in enumerate(POINT_NAMES):
print(f" {i+1}. {name}")
points = [None, None, None, None]
dragging_index = None
def mouse_callback(event, mx, my, flags, param):
nonlocal dragging_index, points
# Linksklick → Punkt setzen oder ziehen
if event == cv2.EVENT_LBUTTONDOWN:
# Prüfen ob Klick auf existierenden Punkt
for i, p in enumerate(points):
if p is not None and dist(p, (mx, my)) < 20:
dragging_index = i
return
# Neuen Punkt setzen
for i in range(4):
if points[i] is None:
points[i] = (mx, my)
print(f"{POINT_NAMES[i]} gesetzt bei {points[i]}")
return
# Dragging
elif event == cv2.EVENT_MOUSEMOVE and dragging_index is not None:
points[dragging_index] = (mx, my)
# Loslassen
elif event == cv2.EVENT_LBUTTONUP:
dragging_index = None
# Rechtsklick → Punkt löschen
elif event == cv2.EVENT_RBUTTONDOWN:
for i, p in enumerate(points):
if p is not None and dist(p, (mx, my)) < 20:
print(f"🗑 {POINT_NAMES[i]} gelöscht")
points[i] = None
return
cv2.namedWindow("Calibration")
cv2.setMouseCallback("Calibration", mouse_callback)
while True:
ok, frame = cap.read()
if not ok:
break
frame = cv2.flip(frame, 1)
h, w, _ = frame.shape
# Punkte zeichnen
for i, p in enumerate(points):
if p is not None:
cv2.circle(frame, p, 10, (0, 255, 255), -1)
cv2.putText(frame, f"P{i+1}", (p[0] + 10, p[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,255,255), 2)
# Polygon zeichnen wenn alle Punkte vorhanden
if all(points):
cv2.polylines(frame, [np.array(points, np.int32)], True, (0, 255, 0), 2)
# Hinweise
cv2.putText(frame, "Setze P1,P2,P3,P4 mit der Maus | S=Speichern | ESC=Abbruch",
(10, h - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255,255,255), 1)
cv2.imshow("Calibration", frame)
key = cv2.waitKey(10) & 0xFF
# Speichern
if key in [ord('s'), 13]: # 's' oder Enter
if None in points:
print("⚠️ Nicht alle 4 Punkte wurden gesetzt!")
else:
with open(OUTPUT_FILE, "w") as f:
json.dump(points, f)
print("💾 Kalibrierung gespeichert:", points)
break
if key == 27: # ESC
print("❌ Kalibrierung abgebrochen")
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()

196
old/gesture_input_osc.py Normal file
View File

@ -0,0 +1,196 @@
import cv2
import mediapipe as mp
import numpy as np
import math, time
from pythonosc import udp_client
# -------------------------------
# SETTINGS
# -------------------------------
TOUCH_CAM_INDEX = 1 # deine Touch-Kamera
GESTURE_CAM_INDEX = 0 # deine Clap / Gesture Kamera
GAME_SCREEN_WIDTH = 900 # muss zu deinem Pygame-Fenster passen!
GAME_SCREEN_HEIGHT = 600
# Wie "streng" ist der Touch?
STILL_REQUIRED = 1.0 # Sekunden, die der Finger fast still sein muss
MOVE_TOLERANCE = 25 # maximal erlaubte Bewegung (Pixel)
# OSC Client → sendet ans Spiel
client = udp_client.SimpleUDPClient("127.0.0.1", 5005)
# Globale Zustände
last_finger_pos = None
finger_still_start = None
prev_touch_time = 0.0
prev_clap_time = 0.0
def run_gesture_input():
global last_finger_pos, finger_still_start
global prev_touch_time, prev_clap_time
mp_hands = mp.solutions.hands
mp_draw = mp.solutions.drawing_utils
hands_touch = mp_hands.Hands(max_num_hands=1, min_detection_confidence=0.6)
hands_gesture = mp_hands.Hands(max_num_hands=2, min_detection_confidence=0.6)
# Kameras öffnen
cam_touch = cv2.VideoCapture(TOUCH_CAM_INDEX)
cam_gesture = cv2.VideoCapture(GESTURE_CAM_INDEX)
if not cam_touch.isOpened():
print(" Touch-Kamera konnte NICHT geöffnet werden!")
else:
print(f"Touch-Kamera geöffnet (Index {TOUCH_CAM_INDEX})")
if not cam_gesture.isOpened():
print(" Gesture-Kamera konnte NICHT geöffnet werden!")
else:
print(f"Gesture-Kamera geöffnet (Index {GESTURE_CAM_INDEX})")
clap_cooldown = 1.5
while True:
ok1, frame_touch = cam_touch.read()
ok2, frame_gest = cam_gesture.read()
if not ok1 or not ok2:
print(" Eine Kamera liefert kein Bild.")
break
frame_touch = cv2.flip(frame_touch, 1)
frame_gest = cv2.flip(frame_gest, 1)
# ---------------------------------------
# TOUCH (Zeigefinger) mit STILLSTAND
# ---------------------------------------
rgb_t = cv2.cvtColor(frame_touch, cv2.COLOR_BGR2RGB)
res_t = hands_touch.process(rgb_t)
th, tw, _ = frame_touch.shape #h= Höhe, w = Breite
if res_t.multi_hand_landmarks:
lm = res_t.multi_hand_landmarks[0]
mp_draw.draw_landmarks(frame_touch, lm, mp_hands.HAND_CONNECTIONS)
# Finger muss nach UNTEN zeigen (8 tiefer als 5)
if lm.landmark[8].y < lm.landmark[5].y:
last_finger_pos = None
finger_still_start = None
continue
fx = int(lm.landmark[8].x * tw)
fy = int(lm.landmark[8].y * th)
sx = int(fx * (GAME_SCREEN_WIDTH / tw))
sy = int(fy * (GAME_SCREEN_HEIGHT / th))
now = time.time()
current_pos = (fx, fy)
# erster Messpunkt
if last_finger_pos is None:
#erster Punkt
last_finger_pos = current_pos
finger_still_start = now
else:
dist = math.hypot(current_pos[0] - last_finger_pos[0],
current_pos[1] - last_finger_pos[1])
if dist < MOVE_TOLERANCE:
#Finger ist "ruhig"
if finger_still_start is None:
finger_still_start = now
else:
still_time = now - finger_still_start
if still_time >= STILL_REQUIRED and (now - prev_touch_time) > 0.5:
client.send_message("/touch", [sx, sy])
print(f"👉 STABILER TOUCH bei {sx},{sy} nach {still_time:.2f}s")
prev_touch_time = now
finger_still_start = None
else:
finger_still_start = now
# IMMER aktualisieren
last_finger_pos = current_pos
# Finger visualisieren
cv2.circle(frame_touch, (fx, fy), 10, (0, 255, 0), -1)
cv2.putText(frame_touch, f"{sx},{sy}", (fx + 10, fy - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
now = time.time()
current_pos = (fx, fy)
if last_finger_pos is None:
# erster Punkt
last_finger_pos = current_pos
finger_still_start = now
else:
dist = math.hypot(current_pos[0] - last_finger_pos[0],
current_pos[1] - last_finger_pos[1])
if dist < MOVE_TOLERANCE:
# Finger ist "ruhig"
if finger_still_start is None:
finger_still_start = now
else:
still_time = now - finger_still_start
if still_time >= STILL_REQUIRED and (now - prev_touch_time) > 0.5:
# JETZT: stabiler Touch → sende genau 1 Klick
client.send_message("/touch", [sx, sy])
print(f"STABILER TOUCH bei {sx},{sy} nach {still_time:.2f}s")
print("SCREEN COORD:", sx, sy)
prev_touch_time = now
# reset, damit der nächste Touch erst nach neuer Bewegung kommt
finger_still_start = None
else:
# Finger hat sich deutlich bewegt → Timer neu starten
finger_still_start = now
last_finger_pos = current_pos
else:
# keine Hand → Reset
last_finger_pos = None
finger_still_start = None
# ---------------------------------------
# CLAP (zwei Hände)
# ---------------------------------------
rgb_g = cv2.cvtColor(frame_gest, cv2.COLOR_BGR2RGB)
res_g = hands_gesture.process(rgb_g)
gh, gw, _ = frame_gest.shape
if res_g.multi_hand_landmarks and len(res_g.multi_hand_landmarks) == 2:
h1, h2 = res_g.multi_hand_landmarks
x1 = np.mean([p.x for p in h1.landmark]) * gw
y1 = np.mean([p.y for p in h1.landmark]) * gh
x2 = np.mean([p.x for p in h2.landmark]) * gw
y2 = np.mean([p.y for p in h2.landmark]) * gh
dist = math.hypot(x2 - x1, y2 - y1)
if dist < 100 and (time.time() - prev_clap_time) > clap_cooldown:
prev_clap_time = time.time()
client.send_message("/clap", 1)
print("👏 SEND /clap")
cv2.putText(frame_gest, "👏", (int(gw/2)-20, 80),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 3)
cv2.imshow("Touch-Cam", frame_touch)
cv2.imshow("Gesture-Cam", frame_gest)
if cv2.waitKey(5) & 0xFF == 27:
break
cam_touch.release()
cam_gesture.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
run_gesture_input()

View File

@ -0,0 +1,119 @@
import cv2
import mediapipe as mp
import numpy as np
import math, time
from pythonosc import udp_client
# -------------------------------
# SETTINGS
# -------------------------------
TOUCH_CAM_INDEX = 0 # deine Touch-Kamera / oben
GESTURE_CAM_INDEX = 1 # deine Clap / Gesture Kamera / unten
GAME_SCREEN_WIDTH = 900 # muss zu deinem Pygame-Fenster passen!
GAME_SCREEN_HEIGHT = 600
client = udp_client.SimpleUDPClient("127.0.0.1", 5005)
# -------------------------------
# MAIN FUNCTION
# -------------------------------
def run_gesture_input():
mp_hands = mp.solutions.hands
mp_draw = mp.solutions.drawing_utils
hands_touch = mp_hands.Hands(max_num_hands=1, min_detection_confidence=0.6)
hands_gesture = mp_hands.Hands(max_num_hands=2, min_detection_confidence=0.6)
# Kameras öffnen
cam_touch = cv2.VideoCapture(TOUCH_CAM_INDEX)
cam_gesture = cv2.VideoCapture(GESTURE_CAM_INDEX)
if not cam_touch.isOpened():
print("❌ Touch-Kamera konnte NICHT geöffnet werden!")
else:
print(f"✅ Touch-Kamera geöffnet (Index {TOUCH_CAM_INDEX})")
if not cam_gesture.isOpened():
print("❌ Gesture-Kamera konnte NICHT geöffnet werden!")
else:
print(f"✅ Gesture-Kamera geöffnet (Index {GESTURE_CAM_INDEX})")
prev_clap_time = 0
clap_cooldown = 1.5
while True:
ok1, frame_touch = cam_touch.read()
ok2, frame_gest = cam_gesture.read()
if not ok1 or not ok2:
print("⚠️ Eine Kamera liefert kein Bild.")
break
frame_touch = cv2.flip(frame_touch, 1)
frame_gest = cv2.flip(frame_gest, 1)
# ---------------------------------------
# TOUCH (Zeigefinger) ohne Kalibrierung
# ---------------------------------------
rgb_t = cv2.cvtColor(frame_touch, cv2.COLOR_BGR2RGB)
res_t = hands_touch.process(rgb_t)
th, tw, _ = frame_touch.shape
if res_t.multi_hand_landmarks:
lm = res_t.multi_hand_landmarks[0]
mp_draw.draw_landmarks(frame_touch, lm, mp_hands.HAND_CONNECTIONS)
fx = int(lm.landmark[8].x * tw)
fy = int(lm.landmark[8].y * th)
# einfache Skalierung auf dein Spiel-Fenster
sx = int(fx * (GAME_SCREEN_WIDTH / tw))
sy = int(fy * (GAME_SCREEN_HEIGHT / th))
# Finger unten? (Touch)
if lm.landmark[8].y > 0.8:
client.send_message("/touch", [sx, sy])
cv2.putText(frame_touch, f"Touch {sx},{sy}", (40, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
# ---------------------------------------
# CLAP (zwei Hände)
# ---------------------------------------
rgb_g = cv2.cvtColor(frame_gest, cv2.COLOR_BGR2RGB)
res_g = hands_gesture.process(rgb_g)
gh, gw, _ = frame_gest.shape
if res_g.multi_hand_landmarks and len(res_g.multi_hand_landmarks) == 2:
h1, h2 = res_g.multi_hand_landmarks
x1 = np.mean([p.x for p in h1.landmark]) * gw
y1 = np.mean([p.y for p in h1.landmark]) * gh
x2 = np.mean([p.x for p in h2.landmark]) * gw
y2 = np.mean([p.y for p in h2.landmark]) * gh
dist = math.hypot(x2 - x1, y2 - y1)
if dist < 100 and (time.time() - prev_clap_time) > clap_cooldown:
prev_clap_time = time.time()
client.send_message("/clap", 1)
cv2.putText(frame_gest, "👏", (int(gw/2)-20, 80),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 3)
cv2.imshow("Touch-Cam", frame_touch)
cv2.imshow("Gesture-Cam", frame_gest)
if cv2.waitKey(5) & 0xFF == 27:
break
cam_touch.release()
cam_gesture.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
run_gesture_input()

View File

@ -3,7 +3,7 @@ from pythonosc import dispatcher, osc_server
import threading import threading
#python test_touch_area.py #python test_touch_area.py
SCREEN_WIDTH = 900 SCREEN_WIDTH = 900
SCREEN_HEIGHT = 600 SCREEN_HEIGHT = 500
# Letzter Touchpunkt # Letzter Touchpunkt
touch_pos = None touch_pos = None