- added eyeFeature_kalibrierung
- added documentation for eyeTracking data
This commit is contained in:
parent
eba9b07487
commit
145a5ecf78
174
dataset_creation/camera_handling/eyeFeature_kalibrierung.py
Normal file
174
dataset_creation/camera_handling/eyeFeature_kalibrierung.py
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
import cv2
|
||||||
|
import mediapipe as mp
|
||||||
|
import numpy as np
|
||||||
|
import pyautogui
|
||||||
|
import pandas as pd
|
||||||
|
import time
|
||||||
|
from sklearn.pipeline import make_pipeline
|
||||||
|
from sklearn.preprocessing import PolynomialFeatures
|
||||||
|
from sklearn.linear_model import LinearRegression
|
||||||
|
|
||||||
|
# Bildschirmgröße
|
||||||
|
screen_w, screen_h = pyautogui.size()
|
||||||
|
|
||||||
|
# MediaPipe Setup
|
||||||
|
mp_face_mesh = mp.solutions.face_mesh
|
||||||
|
face_mesh = mp_face_mesh.FaceMesh(refine_landmarks=True)
|
||||||
|
cap = cv2.VideoCapture(0)
|
||||||
|
|
||||||
|
# Iris Landmark Indizes
|
||||||
|
LEFT_IRIS = [468, 469, 470, 471, 472]
|
||||||
|
RIGHT_IRIS = [473, 474, 475, 476, 477]
|
||||||
|
|
||||||
|
def get_iris_center(landmarks, indices):
|
||||||
|
points = np.array([[landmarks[i].x, landmarks[i].y] for i in indices])
|
||||||
|
return np.mean(points, axis=0)
|
||||||
|
|
||||||
|
# Kalibrierpunkte
|
||||||
|
calibration_points = [
|
||||||
|
(0.1,0.1),(0.5,0.1),(0.9,0.1),
|
||||||
|
(0.1,0.5),(0.5,0.5),(0.9,0.5),
|
||||||
|
(0.1,0.9),(0.5,0.9),(0.9,0.9)
|
||||||
|
]
|
||||||
|
|
||||||
|
left_data = []
|
||||||
|
right_data = []
|
||||||
|
|
||||||
|
print("Kalibrierung startet...")
|
||||||
|
|
||||||
|
for idx, (px, py) in enumerate(calibration_points):
|
||||||
|
|
||||||
|
screen = np.zeros((screen_h, screen_w, 3), dtype=np.uint8)
|
||||||
|
|
||||||
|
for j, (cpx, cpy) in enumerate(calibration_points):
|
||||||
|
cx = int(cpx * screen_w)
|
||||||
|
cy = int(cpy * screen_h)
|
||||||
|
|
||||||
|
if j == idx:
|
||||||
|
color = (0, 0, 255)
|
||||||
|
radius = 25
|
||||||
|
else:
|
||||||
|
color = (255, 255, 255)
|
||||||
|
radius = 15
|
||||||
|
|
||||||
|
cv2.circle(screen, (cx, cy), radius, color, -1)
|
||||||
|
|
||||||
|
# Fenster vorbereiten
|
||||||
|
cv2.namedWindow("Calibration", cv2.WINDOW_NORMAL)
|
||||||
|
cv2.imshow("Calibration", screen)
|
||||||
|
cv2.waitKey(1000)
|
||||||
|
|
||||||
|
samples_left = []
|
||||||
|
samples_right = []
|
||||||
|
|
||||||
|
start = time.time()
|
||||||
|
while time.time() - start < 2:
|
||||||
|
ret, frame = cap.read()
|
||||||
|
frame = cv2.flip(frame, 1)
|
||||||
|
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||||
|
results = face_mesh.process(rgb)
|
||||||
|
|
||||||
|
if results.multi_face_landmarks:
|
||||||
|
mesh = results.multi_face_landmarks[0].landmark
|
||||||
|
|
||||||
|
left_center = get_iris_center(mesh, LEFT_IRIS)
|
||||||
|
right_center = get_iris_center(mesh, RIGHT_IRIS)
|
||||||
|
|
||||||
|
samples_left.append(left_center)
|
||||||
|
samples_right.append(right_center)
|
||||||
|
|
||||||
|
avg_left = np.mean(samples_left, axis=0)
|
||||||
|
avg_right = np.mean(samples_right, axis=0)
|
||||||
|
|
||||||
|
target_x = int(px * screen_w)
|
||||||
|
target_y = int(py * screen_h)
|
||||||
|
|
||||||
|
left_data.append([avg_left[0], avg_left[1], target_x, target_y])
|
||||||
|
right_data.append([avg_right[0], avg_right[1], target_x, target_y])
|
||||||
|
|
||||||
|
cv2.destroyWindow("Calibration")
|
||||||
|
|
||||||
|
# Training
|
||||||
|
def train_model(data):
|
||||||
|
data = np.array(data)
|
||||||
|
X = data[:, :2]
|
||||||
|
yx = data[:, 2]
|
||||||
|
yy = data[:, 3]
|
||||||
|
|
||||||
|
model_x = make_pipeline(PolynomialFeatures(2), LinearRegression())
|
||||||
|
model_y = make_pipeline(PolynomialFeatures(2), LinearRegression())
|
||||||
|
|
||||||
|
model_x.fit(X, yx)
|
||||||
|
model_y.fit(X, yy)
|
||||||
|
|
||||||
|
return model_x, model_y
|
||||||
|
|
||||||
|
model_lx, model_ly = train_model(left_data)
|
||||||
|
model_rx, model_ry = train_model(right_data)
|
||||||
|
|
||||||
|
print("Kalibrierung abgeschlossen. Tracking startet...")
|
||||||
|
|
||||||
|
# Datenaufzeichnung
|
||||||
|
records = []
|
||||||
|
|
||||||
|
while True:
|
||||||
|
ret, frame = cap.read()
|
||||||
|
frame = cv2.flip(frame, 1)
|
||||||
|
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||||
|
results = face_mesh.process(rgb)
|
||||||
|
|
||||||
|
if results.multi_face_landmarks:
|
||||||
|
mesh = results.multi_face_landmarks[0].landmark
|
||||||
|
|
||||||
|
left_center = get_iris_center(mesh, LEFT_IRIS)
|
||||||
|
right_center = get_iris_center(mesh, RIGHT_IRIS)
|
||||||
|
|
||||||
|
left_input = np.array([left_center])
|
||||||
|
right_input = np.array([right_center])
|
||||||
|
|
||||||
|
lx = model_lx.predict(left_input)[0]
|
||||||
|
ly = model_ly.predict(left_input)[0]
|
||||||
|
|
||||||
|
rx = model_rx.predict(right_input)[0]
|
||||||
|
ry = model_ry.predict(right_input)[0]
|
||||||
|
|
||||||
|
# Pixel-Koordinaten begrenzen
|
||||||
|
lx = np.clip(lx, 0, screen_w)
|
||||||
|
ly = np.clip(ly, 0, screen_h)
|
||||||
|
rx = np.clip(rx, 0, screen_w)
|
||||||
|
ry = np.clip(ry, 0, screen_h)
|
||||||
|
|
||||||
|
# Normierung 0–1
|
||||||
|
lx_norm = lx / screen_w
|
||||||
|
ly_norm = ly / screen_h
|
||||||
|
rx_norm = rx / screen_w
|
||||||
|
ry_norm = ry / screen_h
|
||||||
|
|
||||||
|
records.append([
|
||||||
|
lx_norm, ly_norm,
|
||||||
|
rx_norm, ry_norm
|
||||||
|
])
|
||||||
|
|
||||||
|
print("L:", int(lx), int(ly), " | R:", int(rx), int(ry))
|
||||||
|
|
||||||
|
cv2.imshow("Tracking", frame)
|
||||||
|
|
||||||
|
key = cv2.waitKey(1) & 0xFF
|
||||||
|
if key == ord('q'):
|
||||||
|
print("q gedrückt – beende Tracking")
|
||||||
|
break
|
||||||
|
|
||||||
|
cap.release()
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
# CSV speichern
|
||||||
|
df = pd.DataFrame(records, columns=[
|
||||||
|
"EYE_LEFT_GAZE_POINT_ON_DISPLAY_AREA_X",
|
||||||
|
"EYE_LEFT_GAZE_POINT_ON_DISPLAY_AREA_Y",
|
||||||
|
"EYE_RIGHT_GAZE_POINT_ON_DISPLAY_AREA_X",
|
||||||
|
"EYE_RIGHT_GAZE_POINT_ON_DISPLAY_AREA_Y"
|
||||||
|
])
|
||||||
|
|
||||||
|
df.to_csv("gaze_data1.csv", index=False)
|
||||||
|
|
||||||
|
print("Daten gespeichert als gaze_data1.csv")
|
||||||
@ -68,6 +68,24 @@ Runtime behavior:
|
|||||||
Operational note:
|
Operational note:
|
||||||
- `DB_PATH` and other paths are currently code-configured and must be adapted per deployment.
|
- `DB_PATH` and other paths are currently code-configured and must be adapted per deployment.
|
||||||
|
|
||||||
|
### 2.4 Two Approaches to Eye-Tracking Data Collection
|
||||||
|
|
||||||
|
Eye-tracking can be implemented using two main approaches:
|
||||||
|
|
||||||
|
Used Approach: Relative Iris Position
|
||||||
|
- Tracks the position of the pupil within the eye region
|
||||||
|
- The position is normalized relative to the eye itself
|
||||||
|
- No reference to the screen or physical environment is required
|
||||||
|
|
||||||
|
Not Used Approach: Screen Calibration
|
||||||
|
- Requires the user to look at 9 predefined points on the screen
|
||||||
|
- A mapping model is trained based on these points
|
||||||
|
- Establishes a relationship between eye movement and screen coordinates
|
||||||
|
|
||||||
|
Important Considerations (for both methods)
|
||||||
|
- Keep the head as still as possible
|
||||||
|
- Ensure consistent and even lighting conditions
|
||||||
|
|
||||||
## 3) EDA
|
## 3) EDA
|
||||||
The directory EDA provides several files to get insights into both the raw data from AdaBase and your own dataset.
|
The directory EDA provides several files to get insights into both the raw data from AdaBase and your own dataset.
|
||||||
|
|
||||||
@ -317,6 +335,7 @@ Otherwise, as described in `readme.md: Setup`, you can use `prediction_env.yaml`
|
|||||||
- `dataset_creation/camera_handling/eyeFeature_new.py` - eye-feature extraction from gaze parquet
|
- `dataset_creation/camera_handling/eyeFeature_new.py` - eye-feature extraction from gaze parquet
|
||||||
- `dataset_creation/camera_handling/db_helper.py` - SQLite helper functions (camera pipeline)
|
- `dataset_creation/camera_handling/db_helper.py` - SQLite helper functions (camera pipeline)
|
||||||
- `dataset_creation/camera_handling/camera_stream.py` - baseline camera streaming script
|
- `dataset_creation/camera_handling/camera_stream.py` - baseline camera streaming script
|
||||||
|
- `dataset_creation/camera_handling/eyeFeature_kalibrierung.py` - eye-feature extraction with calibration
|
||||||
- `dataset_creation/camera_handling/db_test.py` - DB test utility
|
- `dataset_creation/camera_handling/db_test.py` - DB test utility
|
||||||
|
|
||||||
### EDA
|
### EDA
|
||||||
@ -387,4 +406,3 @@ Otherwise, as described in `readme.md: Setup`, you can use `prediction_env.yaml`
|
|||||||
|
|
||||||
- Several paths are hardcoded on purpose to ensure compability with the jetsonboard at the OHM-UX driving simulator.
|
- Several paths are hardcoded on purpose to ensure compability with the jetsonboard at the OHM-UX driving simulator.
|
||||||
- Camera and AU processing are resource-intensive; version pinning and hardware validation are recommended.
|
- Camera and AU processing are resource-intensive; version pinning and hardware validation are recommended.
|
||||||
|
|
||||||
|
|||||||
@ -7,9 +7,9 @@ For full documentation, see [project_report.md](project_report.md).
|
|||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
### 1) Setup
|
### 1) Setup
|
||||||
Activate the conda-repository "".
|
Activate the conda-repository "camera_stream_AU_ET_test".
|
||||||
```bash
|
```bash
|
||||||
conda activate
|
conda activate camera_stream_AU_ET_test
|
||||||
```
|
```
|
||||||
**Make sure, another environment that fulfills prediction_env.yaml is available**, matching with predict_pipeline/predict.service
|
**Make sure, another environment that fulfills prediction_env.yaml is available**, matching with predict_pipeline/predict.service
|
||||||
See `predict_pipeline/predict_service_timer_documentation.md`
|
See `predict_pipeline/predict_service_timer_documentation.md`
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user