diff --git a/src_folder/BackEnd/CameraDetection/Track.py b/src_folder/BackEnd/CameraDetection/Track.py index 79a2496..5178363 100644 --- a/src_folder/BackEnd/CameraDetection/Track.py +++ b/src_folder/BackEnd/CameraDetection/Track.py @@ -1,6 +1,7 @@ import numpy as np import cv2 + cap = cv2.VideoCapture(0) def nothing(x): diff --git a/src_folder/BackEnd/camera.py b/src_folder/BackEnd/camera.py index f2cfc01..edab2a0 100644 --- a/src_folder/BackEnd/camera.py +++ b/src_folder/BackEnd/camera.py @@ -9,17 +9,17 @@ class Camera(): self.colors = [(0, 0, 255), (0, 255, 0), (255, 0, 0)] self.color_names = ["Rot", "Gruen", "Blau"] - self.lower_red = np.array([80, 160, 150]) - self.upper_red = np.array([255, 255, 255]) + self.lower_red = np.array([0, 185, 15]) + self.upper_red = np.array([50, 255, 255]) - self.lower_green = np.array([40, 50, 160]) - self.upper_green = np.array([80, 255, 255]) + self.lower_green = np.array([20, 70, 25]) + self.upper_green = np.array([65, 200, 255]) - self.lower_blue = np.array([95, 180, 90]) - self.upper_blue = np.array([130, 255, 255]) + self.lower_blue = np.array([100, 220, 5]) + self.upper_blue = np.array([255, 255, 255]) self.video = cv2.VideoCapture(0) - self.start_process = False + self.start_process = True self.correct_field_frame = 0 self.mean_error = [] self.scores = {'score_red': 0, @@ -27,9 +27,9 @@ class Camera(): 'score_blue': 0 } - def get_frame(self) -> np.ndarray: + def get_frame(self): try: - _, self.image = self.video.read(1) + _, self.image = self.video.read(0) except Exception as err: print("Can not capture the video..\n") print(err) @@ -79,7 +79,7 @@ class Camera(): def current_score(self, scores: dict): return scores - def range_of_interest(self, frame: np.ndarray, correct_field: int): + def range_of_interest(self, frame, correct_field: int): num_windows_in_y = 1 num_windows_in_x = 3 @@ -105,31 +105,21 @@ class Camera(): self.current_score[team_color] = round(error) def process(self): - my_camera = Camera() square_error = [] - while my_camera.start_process: - frame = my_camera.get_frame() - interested_area = my_camera.range_of_interest(frame, my_camera.correct_field_frame) - my_camera.detect_color(interested_area) + for i in range(0,100): + frame = self.get_frame() + interested_area = self.range_of_interest(frame, self.correct_field_frame) + self.detect_color(interested_area) cv2.line(img=frame, pt1=(frame.shape[1]//3, 0), pt2=(frame.shape[1]//3, frame.shape[0]), color=(0, 0, 0), thickness=2) cv2.line(img=frame, pt1=(2 * frame.shape[1]//3, 0), pt2=(2 * frame.shape[1]//3, frame.shape[0]), color=(0, 0, 0), thickness=2) cv2.imshow("Kamera 1,2 oder 3", frame) - print(my_camera.scores) - square_error.append(my_camera.scores) - - if not my_camera.start_process: - pass - - if cv2.waitKey(1) & 0xFF == ord('q'): - my_camera.take_picture() - print(my_camera.scores) - break - - my_camera.video.release() - cv2.destroyAllWindows() + square_error.append(self.scores) + self.take_picture() + print(f'{i}. Iteration scores: {self.scores}') + self.start_process = False # nur zum testen: my_camera.start_process auf True setzen und correct_field_frame zwischen 1 und 3 wählen diff --git a/src_folder/BackEnd/router.py b/src_folder/BackEnd/router.py index febbfe0..8f1280a 100644 --- a/src_folder/BackEnd/router.py +++ b/src_folder/BackEnd/router.py @@ -1,7 +1,6 @@ from flask import Flask, jsonify, Response, request from camera import Camera from game import Game -import time app = Flask(__name__) @@ -29,9 +28,8 @@ def scoreboard(): @app.route('/check', methods=['GET']) def check(): my_camera.start_process = True - my_camera.process() - time.sleep(5) my_camera.correct_field_frame = my_game.field + my_camera.process() my_game.set_scoreboard(my_camera.scores) my_camera.start_process = False return jsonify(my_game.scoreboard)