erste test für initial commit
This commit is contained in:
parent
beab4b0111
commit
f1759641d1
137
src_folder/BackEnd/CameraDetection/CameraDetection.py
Normal file
137
src_folder/BackEnd/CameraDetection/CameraDetection.py
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
# from Track import nothing
|
||||||
|
# Farbwerte für die Erkennung (Beispiel: Rot, Grün, Blau)
|
||||||
|
colors = [(0, 0, 255), (0, 255, 0), (255, 0, 0)]
|
||||||
|
color_names = ["Rot", "Gruen", "Blau"]
|
||||||
|
|
||||||
|
# Farbgrenzen für die Erkennung
|
||||||
|
lower_red = np.array([0, 100, 100])
|
||||||
|
upper_red = np.array([10, 255, 255])
|
||||||
|
|
||||||
|
lower_green = np.array([40, 100, 100])
|
||||||
|
upper_green = np.array([70, 255, 255])
|
||||||
|
|
||||||
|
lower_blue = np.array([90, 100, 100])
|
||||||
|
upper_blue = np.array([130, 255, 255])
|
||||||
|
|
||||||
|
# Funktion zur Farberkennung
|
||||||
|
def erkennung_farben(img):
|
||||||
|
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
|
||||||
|
results = []
|
||||||
|
|
||||||
|
count_red = 0
|
||||||
|
count_green = 0
|
||||||
|
count_blue = 0
|
||||||
|
|
||||||
|
for i, color in enumerate(colors):
|
||||||
|
if i == 0:
|
||||||
|
lower = lower_red
|
||||||
|
upper = upper_red
|
||||||
|
elif i == 1:
|
||||||
|
lower = lower_green
|
||||||
|
upper = upper_green
|
||||||
|
elif i == 2:
|
||||||
|
lower = lower_blue
|
||||||
|
upper = upper_blue
|
||||||
|
|
||||||
|
mask = cv2.inRange(hsv_img, lower, upper)
|
||||||
|
|
||||||
|
# Farbfläche finden
|
||||||
|
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
|
center = None
|
||||||
|
count = 0
|
||||||
|
|
||||||
|
for contour in contours:
|
||||||
|
if count < 3:
|
||||||
|
if cv2.contourArea(contour) > 100:
|
||||||
|
# Schwerpunkt der Kontur berechnen
|
||||||
|
M = cv2.moments(contour)
|
||||||
|
if M["m00"] > 0:
|
||||||
|
cX = int(M["m10"] / M["m00"])
|
||||||
|
cY = int(M["m01"] / M["m00"])
|
||||||
|
center = (cX, cY)
|
||||||
|
count += 1
|
||||||
|
# Rechteck zeichnen
|
||||||
|
x, y, w, h = cv2.boundingRect(contour)
|
||||||
|
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
|
||||||
|
cv2.putText(img, color_names[i], (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, 2)
|
||||||
|
|
||||||
|
# Farbanzahl erhöhen
|
||||||
|
if i == 0:
|
||||||
|
count_red += 1
|
||||||
|
elif i == 1:
|
||||||
|
count_green += 1
|
||||||
|
elif i == 2:
|
||||||
|
count_blue += 1
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
results.append(center)
|
||||||
|
|
||||||
|
return img, results, count_red, count_green, count_blue
|
||||||
|
|
||||||
|
# Funktion zur Positionsermittlung
|
||||||
|
def ermittle_position(results, img_width):
|
||||||
|
positions = []
|
||||||
|
|
||||||
|
for result in results:
|
||||||
|
if result is None:
|
||||||
|
position = "Nicht gefunden"
|
||||||
|
else:
|
||||||
|
x = result[0]
|
||||||
|
if x < img_width / 3:
|
||||||
|
position = "Rechts"
|
||||||
|
elif x < 2 * img_width / 3:
|
||||||
|
position = "Mitte"
|
||||||
|
else:
|
||||||
|
position = "Links"
|
||||||
|
|
||||||
|
positions.append(position)
|
||||||
|
|
||||||
|
return positions
|
||||||
|
|
||||||
|
# Hauptprogramm
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Videoquelle öffnen (kann auch eine Bilddatei sein)
|
||||||
|
video = cv2.VideoCapture(1) # Hier "0" für die Kamera verwenden
|
||||||
|
|
||||||
|
while True:
|
||||||
|
# Einzelbild von der Videoquelle lesen
|
||||||
|
ret, frame = video.read()
|
||||||
|
|
||||||
|
# Fehlerbehandlung, wenn kein Bild gelesen werden kann
|
||||||
|
if not ret:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Farben erkennen
|
||||||
|
farben_img, ergebnisse, count_red, count_green, count_blue = erkennung_farben(frame)
|
||||||
|
|
||||||
|
# Anzahl der Farben anzeigen
|
||||||
|
cv2.putText(farben_img, f"Rot: {count_red}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, colors[0], 2)
|
||||||
|
cv2.putText(farben_img, f"Gruen: {count_green}", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, colors[1], 2)
|
||||||
|
cv2.putText(farben_img, f"Blau: {count_blue}", (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 1, colors[2], 2)
|
||||||
|
|
||||||
|
# Positionen ermitteln
|
||||||
|
img_width = frame.shape[1]
|
||||||
|
positionen = ermittle_position(ergebnisse, img_width)
|
||||||
|
|
||||||
|
# Positionen anzeigen
|
||||||
|
for i, position in enumerate(positionen):
|
||||||
|
cv2.putText(farben_img, f"{color_names[i]}: {position}", (10, 150 + 30 * i),
|
||||||
|
cv2.FONT_HERSHEY_SIMPLEX, 1, colors[i], 2)
|
||||||
|
|
||||||
|
# Linien zeichnen
|
||||||
|
cv2.line(farben_img, (img_width // 3, 0), (img_width // 3, frame.shape[0]), (0, 0, 0), 2)
|
||||||
|
cv2.line(farben_img, (2 * img_width // 3, 0), (2 * img_width // 3, frame.shape[0]), (0, 0, 0), 2)
|
||||||
|
|
||||||
|
# Bild anzeigen
|
||||||
|
cv2.imshow("Farberkennung", farben_img)
|
||||||
|
|
||||||
|
# Auf "q" drücken, um die Schleife zu beenden
|
||||||
|
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||||
|
break
|
||||||
|
|
||||||
|
# Videoquelle und Fenster schließen
|
||||||
|
video.release()
|
||||||
|
cv2.destroyAllWindows()
|
44
src_folder/BackEnd/CameraDetection/Track.py
Normal file
44
src_folder/BackEnd/CameraDetection/Track.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
import time
|
||||||
|
import numpy as np
|
||||||
|
import cv2
|
||||||
|
|
||||||
|
cap = cv2.VideoCapture(0)
|
||||||
|
|
||||||
|
def nothing(x):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
cv2.namedWindow("Trackbars")
|
||||||
|
cv2.createTrackbar("L - H", "Trackbars", 0, 255, nothing)
|
||||||
|
cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
|
||||||
|
cv2.createTrackbar("L - V", "Trackbars", 0, 255, nothing)
|
||||||
|
cv2.createTrackbar("U - H", "Trackbars", 255, 255, nothing)
|
||||||
|
cv2.createTrackbar("U - S", "Trackbars", 255, 255, nothing)
|
||||||
|
cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
ret, frame = cap.read()
|
||||||
|
frame = cv2.resize(frame, (640, 480))
|
||||||
|
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
|
||||||
|
|
||||||
|
l_h = cv2.getTrackbarPos("L - H", "Trackbars")
|
||||||
|
l_s = cv2.getTrackbarPos("L - S", "Trackbars")
|
||||||
|
l_v = cv2.getTrackbarPos("L - V", "Trackbars")
|
||||||
|
u_h = cv2.getTrackbarPos("U - H", "Trackbars")
|
||||||
|
u_s = cv2.getTrackbarPos("U - S", "Trackbars")
|
||||||
|
u_v = cv2.getTrackbarPos("U - V", "Trackbars")
|
||||||
|
lower_range = np.array([l_h, l_s, l_v])
|
||||||
|
upper_range = np.array([u_h, u_s, u_v])
|
||||||
|
mask = cv2.inRange(hsv, lower_range, upper_range)
|
||||||
|
result = cv2.bitwise_and(frame, frame, mask=mask)
|
||||||
|
|
||||||
|
# show thresholded image
|
||||||
|
cv2.imshow("mask", mask)
|
||||||
|
cv2.imshow("result", result)
|
||||||
|
|
||||||
|
key = cv2.waitKey(1) & 0xFF
|
||||||
|
if key == ord("q"):
|
||||||
|
break
|
||||||
|
cap.release()
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
Loading…
x
Reference in New Issue
Block a user