Compare commits
No commits in common. "13549bb5947ad7303b51dedf3633034414015648" and "41ef2c4cb6999879056cc1218eaba77ac6b7a4ae" have entirely different histories.
13549bb594
...
41ef2c4cb6
5
.gitignore
vendored
5
.gitignore
vendored
@ -7,7 +7,4 @@ camera/videos
|
|||||||
*.jpg
|
*.jpg
|
||||||
*.h264
|
*.h264
|
||||||
*.mp4
|
*.mp4
|
||||||
*.png
|
*.png
|
||||||
.vscode/
|
|
||||||
camera/.vscode/
|
|
||||||
camera/.vscode/launch.json
|
|
3
camera/.vscode/launch.json
vendored
3
camera/.vscode/launch.json
vendored
@ -1,7 +1,6 @@
|
|||||||
{
|
{
|
||||||
"version": "0.2.0",
|
"version": "0.2.0",
|
||||||
"configurations": [
|
"configurations": [
|
||||||
|
|
||||||
{
|
{
|
||||||
"name": "Python: Current File",
|
"name": "Python: Current File",
|
||||||
"type": "python",
|
"type": "python",
|
||||||
@ -16,7 +15,7 @@
|
|||||||
"request": "launch",
|
"request": "launch",
|
||||||
"program": "${file}",
|
"program": "${file}",
|
||||||
"console": "integratedTerminal",
|
"console": "integratedTerminal",
|
||||||
"args": ["-v", "run.mp4"]
|
"args": ["-v", "~/Videos/video.h264"]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
140
camera/counter_people.py
Executable file → Normal file
140
camera/counter_people.py
Executable file → Normal file
@ -1,12 +1,11 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import numpy as np
|
|
||||||
import cv2
|
|
||||||
import imutils
|
|
||||||
from imutils.object_detection import non_max_suppression
|
from imutils.object_detection import non_max_suppression
|
||||||
from video_stream import imagezmq
|
import numpy as np
|
||||||
|
import imutils
|
||||||
|
import cv2
|
||||||
|
import time
|
||||||
|
import argparse
|
||||||
|
import time
|
||||||
|
import base64
|
||||||
|
|
||||||
'''
|
'''
|
||||||
Usage:
|
Usage:
|
||||||
@ -17,7 +16,6 @@ python peopleCounter.py -c # Attempts to detect people using webcam
|
|||||||
|
|
||||||
HOGCV = cv2.HOGDescriptor()
|
HOGCV = cv2.HOGDescriptor()
|
||||||
HOGCV.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
|
HOGCV.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
|
||||||
VERBOSITY = False
|
|
||||||
|
|
||||||
def detector(image):
|
def detector(image):
|
||||||
'''
|
'''
|
||||||
@ -26,7 +24,7 @@ def detector(image):
|
|||||||
|
|
||||||
clone = image.copy()
|
clone = image.copy()
|
||||||
|
|
||||||
(rects, _) = HOGCV.detectMultiScale(image, winStride=(4, 4), padding=(8, 8), scale=1.05)
|
(rects, weights) = HOGCV.detectMultiScale(image, winStride=(4, 4), padding=(8, 8), scale=1.05)
|
||||||
|
|
||||||
# draw the original bounding boxes
|
# draw the original bounding boxes
|
||||||
for (x, y, w, h) in rects:
|
for (x, y, w, h) in rects:
|
||||||
@ -39,65 +37,54 @@ def detector(image):
|
|||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def args_parser():
|
|
||||||
''' images, videos, remote or a local camera feed allowed
|
|
||||||
verbose for added debugging'''
|
|
||||||
ap = argparse.ArgumentParser()
|
|
||||||
ap.add_argument("-i", "--image", default=None,
|
|
||||||
help="path to image test file directory")
|
|
||||||
ap.add_argument("-c", "--camera", action="store_true", default=False,
|
|
||||||
help="Set as true if you wish to use the camera")
|
|
||||||
ap.add_argument("-v", "--video", default=None,
|
|
||||||
help="path to the video file")
|
|
||||||
ap.add_argument("-r", "--remote", action="store_true", default=False,
|
|
||||||
help="video comes from remote source via imagezmq")
|
|
||||||
ap.add_argument("--verbose", action="store_true", default=False,
|
|
||||||
help="increase output verbosity")
|
|
||||||
args = vars(ap.parse_args())
|
|
||||||
|
|
||||||
if args["verbose"]:
|
def buildPayload(variable, value, context):
|
||||||
VERBOSITY = True
|
return {variable: {"value": value, "context": context}}
|
||||||
|
|
||||||
|
|
||||||
|
def argsParser():
|
||||||
|
ap = argparse.ArgumentParser()
|
||||||
|
ap.add_argument("-i", "--image", default=None, help="path to image test file directory")
|
||||||
|
ap.add_argument("-c", "--camera", default=False, help="Set as true if you wish to use the camera")
|
||||||
|
ap.add_argument("-v", "--video", default=None, help="path to the video file")
|
||||||
|
args = vars(ap.parse_args())
|
||||||
|
|
||||||
return args
|
return args
|
||||||
|
|
||||||
def usage():
|
|
||||||
print("usage: counter_people.py [-h] [-i IMAGE] [-c] [-v] [-r REMOTE] [--verbose]")
|
|
||||||
print()
|
|
||||||
print("optional arguments:")
|
|
||||||
print(" -h, --help show this help message and exit")
|
|
||||||
print(" -i IMAGE, --image IMAGE")
|
|
||||||
print(" path to image test file directory")
|
|
||||||
print(" -c, --camera Set as true if you wish to use the camera")
|
|
||||||
print(" -v, --video path to the video file")
|
|
||||||
print(" -r REMOTE, --remote REMOTE")
|
|
||||||
print(" video comes from remote source via imagezmq")
|
|
||||||
print(" --verbose increase output verbosity")
|
|
||||||
|
|
||||||
def localDetect(image_path):
|
def localDetect(image_path):
|
||||||
result = []
|
result = []
|
||||||
image = cv2.imread(image_path)
|
image = cv2.imread(image_path)
|
||||||
image = imutils.resize(image, width=min(400, image.shape[1]))
|
image = imutils.resize(image, width=min(400, image.shape[1]))
|
||||||
|
clone = image.copy()
|
||||||
if len(image) <= 0:
|
if len(image) <= 0:
|
||||||
print("[ERROR] could not read local image")
|
print("[ERROR] could not read local image")
|
||||||
return result
|
return result
|
||||||
print("[INFO] Detecting people")
|
print("[INFO] Detecting people")
|
||||||
result = detector(image)
|
result = detector(image)
|
||||||
|
|
||||||
if VERBOSITY:
|
"""# shows the result
|
||||||
# shows the result
|
for (xA, yA, xB, yB) in result:
|
||||||
for (xA, yA, xB, yB) in result:
|
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
|
||||||
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
|
|
||||||
|
|
||||||
cv2.imshow("result", image)
|
cv2.imshow("result", image)
|
||||||
cv2.waitKey(0)
|
cv2.waitKey(0)
|
||||||
cv2.destroyWindow("result")
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
#cv2.imwrite("result.png", np.hstack((clone, image)))
|
cv2.imwrite("result.png", np.hstack((clone, image)))"""
|
||||||
return result#(result, image)
|
return result#(result, image)
|
||||||
|
|
||||||
|
|
||||||
def videoDetect(cap):
|
def cameraDetect(video_path="", sample_time=5):
|
||||||
while True:
|
|
||||||
|
if video_path:
|
||||||
|
cap = cv2.VideoCapture(video_path)
|
||||||
|
else:
|
||||||
|
cap = cv2.VideoCapture(0)
|
||||||
|
|
||||||
|
#init = time.time()
|
||||||
|
|
||||||
|
while(True):
|
||||||
# Capture frame-by-frame
|
# Capture frame-by-frame
|
||||||
_, frame = cap.read()
|
_, frame = cap.read()
|
||||||
|
|
||||||
@ -110,15 +97,14 @@ def videoDetect(cap):
|
|||||||
# shows the result
|
# shows the result
|
||||||
for (xA, yA, xB, yB) in result:
|
for (xA, yA, xB, yB) in result:
|
||||||
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
|
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
|
||||||
|
cv2.imshow('frame', frame)
|
||||||
if VERBOSITY:
|
cv2.waitKey(0)
|
||||||
cv2.imshow('frame', frame)
|
|
||||||
cv2.waitKey(0)
|
|
||||||
|
|
||||||
#if time.time() - init >= sample_time:
|
#if time.time() - init >= sample_time:
|
||||||
if result:
|
if len(result):
|
||||||
print("{} people detected.".format(len(result)))
|
print("{} people detected.".format(len(result)))
|
||||||
#init = time.time()
|
#init = time.time()
|
||||||
|
|
||||||
|
|
||||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||||
break
|
break
|
||||||
@ -127,52 +113,40 @@ def videoDetect(cap):
|
|||||||
cap.release()
|
cap.release()
|
||||||
cv2.destroyAllWindows()
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
def remoteDetect(image_hub):
|
|
||||||
while True:
|
def convert_to_base64(image):
|
||||||
rpi_name, image = image_hub.recv_image()
|
image = imutils.resize(image, width=400)
|
||||||
cv2.imshow(rpi_name, image) # 1 window for each RPi
|
img_str = cv2.imencode('.png', image)[1].tostring()
|
||||||
cv2.waitKey(1)
|
b64 = base64.b64encode(img_str)
|
||||||
image_hub.send_reply(b'OK')
|
|
||||||
|
return b64.decode('utf-8')
|
||||||
|
|
||||||
|
|
||||||
def detectPeople(args):
|
def detectPeople(args):
|
||||||
image_path = args["image"]
|
image_path = args["image"]
|
||||||
video_path = args["video"]
|
video_path = args["video"]
|
||||||
camera = True if args["camera"] else False
|
camera = True if str(args["camera"]) == 'true' else False
|
||||||
remote = True if args["remote"] else False
|
|
||||||
|
|
||||||
# Routine to read local image
|
# Routine to read local image
|
||||||
if image_path is not None:
|
if image_path != None and not camera and video_path == None:
|
||||||
print("[INFO] Image path provided, attempting to read image")
|
print("[INFO] Image path provided, attempting to read image")
|
||||||
(result, image) = localDetect(image_path)
|
(result, image) = localDetect(image_path)
|
||||||
print(str(len(result)) + " People detected.")
|
print(str(len(result)) + " People detected.")
|
||||||
|
|
||||||
elif video_path is not None:
|
if video_path != None and not camera:
|
||||||
print("[INFO] Video path provided, reading video")
|
print("[INFO] reading video")
|
||||||
cap = cv2.VideoCapture(video_path)
|
cameraDetect(video_path)
|
||||||
videoDetect(cap)
|
|
||||||
|
|
||||||
# Routine to read images from webcam
|
# Routine to read images from webcam
|
||||||
elif camera:
|
if camera:
|
||||||
print("[INFO] Reading images from local camera")
|
print("[INFO] reading camera images")
|
||||||
cap = cv2.VideoCapture(0)
|
cameraDetect()
|
||||||
videoDetect(cap)
|
|
||||||
|
|
||||||
elif remote:
|
|
||||||
print("[INFO] Reading images from remote stream")
|
|
||||||
image_hub = imagezmq.ImageHub()
|
|
||||||
remoteDetect(image_hub)
|
|
||||||
|
|
||||||
else:
|
|
||||||
usage()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
args = args_parser()
|
args = argsParser()
|
||||||
detectPeople(args)
|
detectPeople(args)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
@ -1,232 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
#from datetime import datetime, time
|
|
||||||
import time
|
|
||||||
from statistics import median
|
|
||||||
|
|
||||||
import imutils
|
|
||||||
from imutils.video import VideoStream
|
|
||||||
#from imutils.video import FPS
|
|
||||||
|
|
||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
frame_timer = None
|
|
||||||
contour_timer = None
|
|
||||||
detection_timer = None
|
|
||||||
|
|
||||||
frame_time = []
|
|
||||||
contour_time = []
|
|
||||||
detection_time = []
|
|
||||||
|
|
||||||
VISUAL_DEBUG = True
|
|
||||||
|
|
||||||
def getArgs():
|
|
||||||
""" Arguments """
|
|
||||||
ap = argparse.ArgumentParser()
|
|
||||||
ap.add_argument("-v", "--video", help="path to the video file")
|
|
||||||
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
|
|
||||||
ap.add_argument("-t", "--tracker", type=str, default="csrt", help="OpenCV object tracker type")
|
|
||||||
return vars(ap.parse_args())
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
args = getArgs()
|
|
||||||
timer = Timer()
|
|
||||||
|
|
||||||
# if the video argument is None, then the code will read from webcam (work in progress)
|
|
||||||
if args.get("video", None) is None:
|
|
||||||
vs = VideoStream(src=0).start()
|
|
||||||
time.sleep(2.0)
|
|
||||||
# otherwise, we are reading from a video file
|
|
||||||
else:
|
|
||||||
vs = cv2.VideoCapture(args["video"])
|
|
||||||
|
|
||||||
cv2.namedWindow('Video stream', cv2.WINDOW_NORMAL)
|
|
||||||
detector = DetectionFromFrame(args["min_area"], 0.5)
|
|
||||||
while True:
|
|
||||||
timer.start_frame_timer()
|
|
||||||
detector.currentFrame = vs.read()
|
|
||||||
detector.currentFrame = detector.currentFrame if args.get("video", None) is None else detector.currentFrame[1]
|
|
||||||
# if the frame can not be grabbed, then we have reached the end of the video
|
|
||||||
if detector.currentFrame is None:
|
|
||||||
break
|
|
||||||
|
|
||||||
# resize the frame to 500
|
|
||||||
detector.currentFrame = imutils.resize(detector.currentFrame, width=500)
|
|
||||||
detector.framecounter += 1
|
|
||||||
if detector.framecounter > 1:
|
|
||||||
cnts = detector.prepareFrame()
|
|
||||||
|
|
||||||
for c in cnts:
|
|
||||||
timer.start_contour_timer()
|
|
||||||
bound_rect = cv2.boundingRect(c)
|
|
||||||
#(x, y, w, h) = cv2.boundingRect(c)
|
|
||||||
#initBB2 =(x,y,w,h)
|
|
||||||
|
|
||||||
prott1 = r'ML-Models/MobileNetSSD_deploy.prototxt'
|
|
||||||
prott2 = r'ML-Models/MobileNetSSD_deploy.caffemodel'
|
|
||||||
net = cv2.dnn.readNetFromCaffe(prott1, prott2)
|
|
||||||
|
|
||||||
#trackbox = detector.currentFrame[y:y+h, x:x+w]boundRect[1]
|
|
||||||
trackbox = detector.currentFrame[bound_rect[1]:bound_rect[1]+bound_rect[3],
|
|
||||||
bound_rect[0]:bound_rect[0]+bound_rect[2]]
|
|
||||||
trackbox = cv2.resize(trackbox, (224, 224))
|
|
||||||
#cv2.imshow('image',trackbox)
|
|
||||||
timer.start_detection_timer()
|
|
||||||
blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5)
|
|
||||||
net.setInput(blob)
|
|
||||||
detections = net.forward()
|
|
||||||
|
|
||||||
for i in np.arange(0, detections.shape[2]):
|
|
||||||
|
|
||||||
detector.detectConfidentiallyPeople(i, detections, bound_rect)
|
|
||||||
timer.stop_detection_timer()
|
|
||||||
|
|
||||||
cv2.rectangle(detector.currentFrame, (bound_rect[0], bound_rect[1]),
|
|
||||||
(bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3]), (255, 255, 0), 1)
|
|
||||||
|
|
||||||
timer.stop_contour_timer()
|
|
||||||
|
|
||||||
|
|
||||||
# show the frame and record if the user presses a key
|
|
||||||
cv2.imshow("Video stream", detector.currentFrame)
|
|
||||||
key = cv2.waitKey(1) & 0xFF
|
|
||||||
|
|
||||||
# if the `q` key is pressed, break from the lop
|
|
||||||
if key == ord("q"):
|
|
||||||
break
|
|
||||||
if key == ord("d"):
|
|
||||||
detector.firstFrame = None
|
|
||||||
#detector.lastFrame = detector.currentFrame
|
|
||||||
|
|
||||||
timer.print_time()
|
|
||||||
|
|
||||||
# finally, stop the camera/stream and close any open windows
|
|
||||||
vs.stop() if args.get("video", None) is None else vs.release()
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
|
|
||||||
class Timer:
|
|
||||||
def __init__(self):
|
|
||||||
self.frame_timer = None
|
|
||||||
self.contour_timer = None
|
|
||||||
self.detection_timer = None
|
|
||||||
|
|
||||||
self.contour_time = []
|
|
||||||
self.detection_time = []
|
|
||||||
|
|
||||||
def start_frame_timer(self):
|
|
||||||
self.frame_timer = time.time()
|
|
||||||
|
|
||||||
def get_frame_time(self):
|
|
||||||
return time.time() - self.frame_timer
|
|
||||||
|
|
||||||
def start_contour_timer(self):
|
|
||||||
self.contour_timer = time.time()
|
|
||||||
|
|
||||||
def stop_contour_timer(self):
|
|
||||||
self.contour_time.append(time.time() - self.contour_timer)
|
|
||||||
|
|
||||||
def start_detection_timer(self):
|
|
||||||
self.detection_timer = time.time()
|
|
||||||
|
|
||||||
def stop_detection_timer(self):
|
|
||||||
self.detection_time.append(time.time() - self.detection_timer)
|
|
||||||
|
|
||||||
def print_time(self):
|
|
||||||
average_contour = 0 if not self.contour_time else sum(self.contour_time)/float(len(self.contour_time))
|
|
||||||
average_detection = 0 if not self.detection_time else sum(self.detection_time)/float(len(self.detection_time))
|
|
||||||
|
|
||||||
median_contour = 0 if not self.contour_time else median(self.contour_time)
|
|
||||||
median_detection = 0 if not self.detection_time else median(self.detection_time)
|
|
||||||
|
|
||||||
total_contour = sum(self.contour_time)
|
|
||||||
total_detection = sum(self.detection_time)
|
|
||||||
|
|
||||||
print("Time for Frame: {:.2f}. Contour Total: {:.2f}. Contour Median: {:.2f}. Contour Average: {:.2f}. Detection Total: {:.2f}. Detection Median: {:.2f}. Detection Average: {:.2f}. ".format(
|
|
||||||
self.get_frame_time(), total_contour, median_contour, average_contour, total_detection, median_detection, average_detection))
|
|
||||||
#print("Contour Times:" + str(timer.contour_time))
|
|
||||||
#print("Detection Times:" + str(timer.detection_time))
|
|
||||||
self.contour_time = []
|
|
||||||
self.detection_time = []
|
|
||||||
|
|
||||||
class DetectionFromFrame:
|
|
||||||
def __init__(self, min_size, confidence):
|
|
||||||
self.min_size = min_size
|
|
||||||
self.confidence_level = confidence
|
|
||||||
|
|
||||||
self.firstFrame = None
|
|
||||||
self.currentFrame = None
|
|
||||||
|
|
||||||
self.initBB2 = None
|
|
||||||
self.fps = None
|
|
||||||
self.differ = None
|
|
||||||
self.now = ''
|
|
||||||
self.framecounter = 0
|
|
||||||
self.people_count_total = 0
|
|
||||||
|
|
||||||
|
|
||||||
def prepareFrame(self):
|
|
||||||
gray = cv2.cvtColor(self.currentFrame, cv2.COLOR_BGR2GRAY)
|
|
||||||
gray = cv2.GaussianBlur(gray, (21, 21), 0)
|
|
||||||
|
|
||||||
# if the first frame is None, initialize it
|
|
||||||
if self.firstFrame is None:
|
|
||||||
self.firstFrame = gray
|
|
||||||
return []
|
|
||||||
|
|
||||||
# compute the absolute difference between the current frame and first frame
|
|
||||||
frameDelta = cv2.absdiff(self.firstFrame, gray)
|
|
||||||
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
|
|
||||||
|
|
||||||
#debug
|
|
||||||
"""if VISUAL_DEBUG:
|
|
||||||
cv2.imshow("debug image", thresh)
|
|
||||||
cv2.waitKey(0)
|
|
||||||
cv2.destroyWindow("debug image")
|
|
||||||
#cv2.destroyWindow("threshhold image")"""
|
|
||||||
|
|
||||||
# dilate the thresholded image to fill in holes
|
|
||||||
thresh = cv2.dilate(thresh, None, iterations=2)
|
|
||||||
|
|
||||||
# find contours on thresholded image
|
|
||||||
thresh = np.uint8(thresh)
|
|
||||||
cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
|
|
||||||
return cnts
|
|
||||||
|
|
||||||
def detectConfidentiallyPeople(self, i, detections, bound_rect):
|
|
||||||
#CLASSES = ["person"]
|
|
||||||
|
|
||||||
detected_color = (0, 255, 0)
|
|
||||||
#COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
|
|
||||||
|
|
||||||
confidence = detections[0, 0, i, 2]
|
|
||||||
|
|
||||||
if confidence > self.confidence_level:
|
|
||||||
# extract the index of the class label from the `detections`, then compute the (x, y)-coordinates of
|
|
||||||
# the bounding box for the object
|
|
||||||
#idx = int(detections[0, 0, i, 1])
|
|
||||||
#box = detections[0, 0, i, 3:7] * np.array([bound_rect[2], bound_rect[3], bound_rect[2], bound_rect[3]])
|
|
||||||
#(startX, startY, endX, endY) = box.astype("int")
|
|
||||||
# draw the prediction on the frame
|
|
||||||
|
|
||||||
#label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
|
|
||||||
label = "{:.2f}%".format(confidence * 100)
|
|
||||||
|
|
||||||
#cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
|
|
||||||
cv2.rectangle(self.currentFrame, (bound_rect[0], bound_rect[1]),
|
|
||||||
(bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3]), detected_color, 3)
|
|
||||||
|
|
||||||
y = bound_rect[1] - 15 if bound_rect[1] - 15 > 15 else bound_rect[1] + 15
|
|
||||||
|
|
||||||
#cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
|
|
||||||
cv2.putText(self.currentFrame, label, (bound_rect[0], bound_rect[1]-5), cv2.FONT_HERSHEY_SIMPLEX, 0.3, detected_color, 1)
|
|
||||||
#cv2.imshow("Video stream", self.currentFrame)
|
|
||||||
#print("Person found")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@ -7,5 +7,4 @@ while True: # show streamed images until Ctrl-C
|
|||||||
rpi_name, image = image_hub.recv_image()
|
rpi_name, image = image_hub.recv_image()
|
||||||
cv2.imshow(rpi_name, image) # 1 window for each RPi
|
cv2.imshow(rpi_name, image) # 1 window for each RPi
|
||||||
cv2.waitKey(1)
|
cv2.waitKey(1)
|
||||||
image_hub.send_reply(b'OK')
|
image_hub.send_reply(b'OK')
|
||||||
|
|
Loading…
x
Reference in New Issue
Block a user