Final versions of camera-tools
This commit is contained in:
parent
949649c9a9
commit
7a99fdc45c
@ -17,6 +17,7 @@ python peopleCounter.py -c # Attempts to detect people using webcam
|
||||
|
||||
HOGCV = cv2.HOGDescriptor()
|
||||
HOGCV.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
|
||||
#HOGCV.set
|
||||
VERBOSITY = False
|
||||
|
||||
def detector(image):
|
||||
@ -26,7 +27,9 @@ def detector(image):
|
||||
|
||||
clone = image.copy()
|
||||
|
||||
(rects, _) = HOGCV.detectMultiScale(image, winStride=(4, 4), padding=(8, 8), scale=1.05)
|
||||
|
||||
|
||||
(rects, _) = HOGCV.detectMultiScale(image, winStride=(2, 2), padding=(8, 8), scale=1.05)
|
||||
|
||||
# draw the original bounding boxes
|
||||
for (x, y, w, h) in rects:
|
||||
@ -129,11 +132,32 @@ def videoDetect(cap):
|
||||
|
||||
def remoteDetect(image_hub):
|
||||
while True:
|
||||
rpi_name, image = image_hub.recv_image()
|
||||
cv2.imshow(rpi_name, image) # 1 window for each RPi
|
||||
cv2.waitKey(1)
|
||||
rpi_name, frame = image_hub.recv_image()
|
||||
image_hub.send_reply(b'OK')
|
||||
|
||||
frame = imutils.resize(frame, width=min(400, frame.shape[1]))
|
||||
result = detector(frame.copy())
|
||||
|
||||
# shows the result
|
||||
for (xA, yA, xB, yB) in result:
|
||||
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
|
||||
|
||||
#if VERBOSITY:
|
||||
cv2.imshow('frame', frame)
|
||||
#cv2.waitKey(0)
|
||||
|
||||
#if time.time() - init >= sample_time:
|
||||
if len(result):
|
||||
print("{} people detected.".format(len(result)))
|
||||
#init = time.time()
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
#cv2.imshow(rpi_name, frame) # 1 window for each RPi
|
||||
#cv2.waitKey(1)
|
||||
|
||||
|
||||
|
||||
|
||||
def detectPeople(args):
|
||||
|
@ -1,52 +1,58 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
#from datetime import datetime, time
|
||||
import time
|
||||
from statistics import median
|
||||
|
||||
import imutils
|
||||
from imutils.video import VideoStream
|
||||
#from imutils.video import FPS
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
frame_timer = None
|
||||
contour_timer = None
|
||||
detection_timer = None
|
||||
|
||||
frame_time = []
|
||||
contour_time = []
|
||||
detection_time = []
|
||||
import paho.mqtt.client as mqtt
|
||||
from video_stream import imagezmq
|
||||
|
||||
VISUAL_DEBUG = True
|
||||
BROKER = "141.75.33.126"
|
||||
PORT = 1883
|
||||
|
||||
def getArgs():
|
||||
""" Arguments """
|
||||
ap = argparse.ArgumentParser()
|
||||
ap.add_argument("-v", "--video", help="path to the video file")
|
||||
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
|
||||
ap.add_argument("-t", "--tracker", type=str, default="csrt", help="OpenCV object tracker type")
|
||||
return vars(ap.parse_args())
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
mqtt_client = mqtt.Client("pi-camera")
|
||||
mqtt_client.connect(BROKER, PORT)
|
||||
except:
|
||||
print("Connection to MQTT-Broker failed.")
|
||||
return 1
|
||||
|
||||
try:
|
||||
args = getArgs()
|
||||
timer = Timer()
|
||||
|
||||
# if the video argument is None, then the code will read from webcam (work in progress)
|
||||
if args.get("video", None) is None:
|
||||
vs = VideoStream(src=0).start()
|
||||
#vs = VideoStream(src=0).start()
|
||||
image_hub = imagezmq.ImageHub()
|
||||
|
||||
time.sleep(2.0)
|
||||
# otherwise, we are reading from a video file
|
||||
else:
|
||||
vs = cv2.VideoCapture(args["video"])
|
||||
|
||||
cv2.namedWindow('Video stream', cv2.WINDOW_NORMAL)
|
||||
detector = DetectionFromFrame(args["min_area"], 0.5)
|
||||
detector = DetectionFromFrame(args["min_area"], 0.8)
|
||||
while True:
|
||||
people_count = 0
|
||||
timer.start_frame_timer()
|
||||
if args.get("video", None) is None:
|
||||
rpi_name, detector.currentFrame = image_hub.recv_image()
|
||||
image_hub.send_reply(b'OK')
|
||||
else:
|
||||
detector.currentFrame = vs.read()
|
||||
detector.currentFrame = detector.currentFrame if args.get("video", None) is None else detector.currentFrame[1]
|
||||
# if the frame can not be grabbed, then we have reached the end of the video
|
||||
@ -57,10 +63,10 @@ def main():
|
||||
detector.currentFrame = imutils.resize(detector.currentFrame, width=500)
|
||||
detector.framecounter += 1
|
||||
if detector.framecounter > 1:
|
||||
|
||||
cnts = detector.prepareFrame()
|
||||
|
||||
for c in cnts:
|
||||
timer.start_contour_timer()
|
||||
bound_rect = cv2.boundingRect(c)
|
||||
#(x, y, w, h) = cv2.boundingRect(c)
|
||||
#initBB2 =(x,y,w,h)
|
||||
@ -74,26 +80,23 @@ def main():
|
||||
bound_rect[0]:bound_rect[0]+bound_rect[2]]
|
||||
trackbox = cv2.resize(trackbox, (224, 224))
|
||||
#cv2.imshow('image',trackbox)
|
||||
timer.start_detection_timer()
|
||||
blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5)
|
||||
net.setInput(blob)
|
||||
detections = net.forward()
|
||||
|
||||
for i in np.arange(0, detections.shape[2]):
|
||||
|
||||
detector.detectConfidentiallyPeople(i, detections, bound_rect)
|
||||
timer.stop_detection_timer()
|
||||
people_count += detector.detectConfidentiallyPeople(i, detections, bound_rect)
|
||||
|
||||
cv2.rectangle(detector.currentFrame, (bound_rect[0], bound_rect[1]),
|
||||
(bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3]), (255, 255, 0), 1)
|
||||
|
||||
timer.stop_contour_timer()
|
||||
|
||||
|
||||
# show the frame and record if the user presses a key
|
||||
cv2.imshow("Video stream", detector.currentFrame)
|
||||
key = cv2.waitKey(1) & 0xFF
|
||||
|
||||
# send number of people detected via mqtt
|
||||
mqtt_client.publish("/gso/bb/104/Camera", str(people_count))
|
||||
|
||||
# if the `q` key is pressed, break from the lop
|
||||
if key == ord("q"):
|
||||
break
|
||||
@ -101,55 +104,17 @@ def main():
|
||||
detector.firstFrame = None
|
||||
#detector.lastFrame = detector.currentFrame
|
||||
|
||||
timer.print_time()
|
||||
timer.print_frame_time()
|
||||
|
||||
|
||||
# finally, stop the camera/stream and close any open windows
|
||||
if args.get("video", None) is not None:
|
||||
vs.stop() if args.get("video", None) is None else vs.release()
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
class Timer:
|
||||
def __init__(self):
|
||||
self.frame_timer = None
|
||||
self.contour_timer = None
|
||||
self.detection_timer = None
|
||||
|
||||
self.contour_time = []
|
||||
self.detection_time = []
|
||||
|
||||
def start_frame_timer(self):
|
||||
self.frame_timer = time.time()
|
||||
|
||||
def get_frame_time(self):
|
||||
return time.time() - self.frame_timer
|
||||
|
||||
def start_contour_timer(self):
|
||||
self.contour_timer = time.time()
|
||||
|
||||
def stop_contour_timer(self):
|
||||
self.contour_time.append(time.time() - self.contour_timer)
|
||||
|
||||
def start_detection_timer(self):
|
||||
self.detection_timer = time.time()
|
||||
|
||||
def stop_detection_timer(self):
|
||||
self.detection_time.append(time.time() - self.detection_timer)
|
||||
|
||||
def print_time(self):
|
||||
average_contour = 0 if not self.contour_time else sum(self.contour_time)/float(len(self.contour_time))
|
||||
average_detection = 0 if not self.detection_time else sum(self.detection_time)/float(len(self.detection_time))
|
||||
|
||||
median_contour = 0 if not self.contour_time else median(self.contour_time)
|
||||
median_detection = 0 if not self.detection_time else median(self.detection_time)
|
||||
|
||||
total_contour = sum(self.contour_time)
|
||||
total_detection = sum(self.detection_time)
|
||||
|
||||
print("Time for Frame: {:.2f}. Contour Total: {:.2f}. Contour Median: {:.2f}. Contour Average: {:.2f}. Detection Total: {:.2f}. Detection Median: {:.2f}. Detection Average: {:.2f}. ".format(
|
||||
self.get_frame_time(), total_contour, median_contour, average_contour, total_detection, median_detection, average_detection))
|
||||
#print("Contour Times:" + str(timer.contour_time))
|
||||
#print("Detection Times:" + str(timer.detection_time))
|
||||
self.contour_time = []
|
||||
self.detection_time = []
|
||||
finally:
|
||||
if args.get("video", None) is None:
|
||||
image_hub.send_reply(b'OK')
|
||||
|
||||
class DetectionFromFrame:
|
||||
def __init__(self, min_size, confidence):
|
||||
@ -205,27 +170,65 @@ class DetectionFromFrame:
|
||||
confidence = detections[0, 0, i, 2]
|
||||
|
||||
if confidence > self.confidence_level:
|
||||
# extract the index of the class label from the `detections`, then compute the (x, y)-coordinates of
|
||||
# the bounding box for the object
|
||||
#idx = int(detections[0, 0, i, 1])
|
||||
#box = detections[0, 0, i, 3:7] * np.array([bound_rect[2], bound_rect[3], bound_rect[2], bound_rect[3]])
|
||||
#(startX, startY, endX, endY) = box.astype("int")
|
||||
# draw the prediction on the frame
|
||||
|
||||
#label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
|
||||
label = "{:.2f}%".format(confidence * 100)
|
||||
|
||||
#cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
|
||||
# draw a rectangle in green over the detected area
|
||||
cv2.rectangle(self.currentFrame, (bound_rect[0], bound_rect[1]),
|
||||
(bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3]), detected_color, 3)
|
||||
|
||||
y = bound_rect[1] - 15 if bound_rect[1] - 15 > 15 else bound_rect[1] + 15
|
||||
|
||||
#cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
|
||||
label = "{:.2f}%".format(confidence * 100)
|
||||
cv2.putText(self.currentFrame, label, (bound_rect[0], bound_rect[1]-5), cv2.FONT_HERSHEY_SIMPLEX, 0.3, detected_color, 1)
|
||||
#cv2.imshow("Video stream", self.currentFrame)
|
||||
#print("Person found")
|
||||
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
class Timer:
|
||||
def __init__(self):
|
||||
self.frame_timer = None
|
||||
self.contour_timer = None
|
||||
self.detection_timer = None
|
||||
|
||||
self.contour_time = []
|
||||
self.detection_time = []
|
||||
|
||||
def start_frame_timer(self):
|
||||
self.frame_timer = time.time()
|
||||
|
||||
def get_frame_time(self):
|
||||
return time.time() - self.frame_timer
|
||||
|
||||
def start_contour_timer(self):
|
||||
self.contour_timer = time.time()
|
||||
|
||||
def stop_contour_timer(self):
|
||||
self.contour_time.append(time.time() - self.contour_timer)
|
||||
|
||||
def start_detection_timer(self):
|
||||
self.detection_timer = time.time()
|
||||
|
||||
def stop_detection_timer(self):
|
||||
self.detection_time.append(time.time() - self.detection_timer)
|
||||
|
||||
def print_frame_time(self):
|
||||
print("Time for Frame: {:.2f}.".format(self.get_frame_time()))
|
||||
|
||||
|
||||
def print_other_times(self):
|
||||
average_contour = 0 if not self.contour_time else sum(self.contour_time)/float(len(self.contour_time))
|
||||
average_detection = 0 if not self.detection_time else sum(self.detection_time)/float(len(self.detection_time))
|
||||
|
||||
median_contour = 0 if not self.contour_time else median(self.contour_time)
|
||||
median_detection = 0 if not self.detection_time else median(self.detection_time)
|
||||
|
||||
total_contour = sum(self.contour_time)
|
||||
total_detection = sum(self.detection_time)
|
||||
|
||||
print("Contour Total: {:.2f}. Contour Median: {:.2f}. Contour Average: {:.2f}.".format(
|
||||
total_contour, median_contour, average_contour))
|
||||
print("Detection Total: {:.2f}. Detection Median: {:.2f}. Detection Average: {:.2f}. ".format(
|
||||
total_detection, median_detection, average_detection))
|
||||
|
||||
self.contour_time = []
|
||||
self.detection_time = []
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
Loading…
x
Reference in New Issue
Block a user