|
|
|
|
|
|
|
|
from imutils.object_detection import non_max_suppression |
|
|
|
|
|
|
|
|
#!/usr/bin/env python |
|
|
|
|
|
|
|
|
import numpy as np |
|
|
import numpy as np |
|
|
import imutils |
|
|
import imutils |
|
|
import cv2 |
|
|
import cv2 |
|
|
import time |
|
|
|
|
|
import argparse |
|
|
import argparse |
|
|
import time |
|
|
|
|
|
import base64 |
|
|
|
|
|
|
|
|
from video_stream import imagezmq |
|
|
|
|
|
from imutils.object_detection import non_max_suppression |
|
|
|
|
|
|
|
|
''' |
|
|
''' |
|
|
Usage: |
|
|
Usage: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
HOGCV = cv2.HOGDescriptor() |
|
|
HOGCV = cv2.HOGDescriptor() |
|
|
HOGCV.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) |
|
|
HOGCV.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) |
|
|
|
|
|
VERBOSITY = False |
|
|
|
|
|
|
|
|
def detector(image): |
|
|
def detector(image): |
|
|
''' |
|
|
''' |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return result |
|
|
return result |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def buildPayload(variable, value, context): |
|
|
|
|
|
return {variable: {"value": value, "context": context}} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def argsParser(): |
|
|
def argsParser(): |
|
|
ap = argparse.ArgumentParser() |
|
|
ap = argparse.ArgumentParser() |
|
|
ap.add_argument("-i", "--image", default=None, help="path to image test file directory") |
|
|
|
|
|
ap.add_argument("-c", "--camera", default=False, help="Set as true if you wish to use the camera") |
|
|
|
|
|
ap.add_argument("-v", "--video", default=None, help="path to the video file") |
|
|
|
|
|
|
|
|
ap.add_argument("-i", "--image", default=None, |
|
|
|
|
|
help="path to image test file directory") |
|
|
|
|
|
ap.add_argument("-c", "--camera", action="store_true", default=False, |
|
|
|
|
|
help="Set as true if you wish to use the camera") |
|
|
|
|
|
ap.add_argument("-v", "--video", default=None, |
|
|
|
|
|
help="path to the video file") |
|
|
|
|
|
ap.add_argument("-r", "--remote", action="store_true", default=False, |
|
|
|
|
|
help="video comes from remote source via imagezmq") |
|
|
|
|
|
ap.add_argument("--verbose", action="store_true", default=False, |
|
|
|
|
|
help="increase output verbosity") |
|
|
args = vars(ap.parse_args()) |
|
|
args = vars(ap.parse_args()) |
|
|
|
|
|
|
|
|
|
|
|
if args["verbose"]: |
|
|
|
|
|
VERBOSITY = True |
|
|
|
|
|
|
|
|
return args |
|
|
return args |
|
|
|
|
|
|
|
|
|
|
|
def usage(): |
|
|
|
|
|
print("usage: counter_people.py [-h] [-i IMAGE] [-c] [-v] [-r REMOTE] [--verbose]") |
|
|
|
|
|
print() |
|
|
|
|
|
print("optional arguments:") |
|
|
|
|
|
print(" -h, --help show this help message and exit") |
|
|
|
|
|
print(" -i IMAGE, --image IMAGE") |
|
|
|
|
|
print(" path to image test file directory") |
|
|
|
|
|
print(" -c, --camera Set as true if you wish to use the camera") |
|
|
|
|
|
print(" -v, --video path to the video file") |
|
|
|
|
|
print(" -r REMOTE, --remote REMOTE") |
|
|
|
|
|
print(" video comes from remote source via imagezmq") |
|
|
|
|
|
print(" --verbose increase output verbosity") |
|
|
|
|
|
|
|
|
def localDetect(image_path): |
|
|
def localDetect(image_path): |
|
|
result = [] |
|
|
result = [] |
|
|
image = cv2.imread(image_path) |
|
|
image = cv2.imread(image_path) |
|
|
image = imutils.resize(image, width=min(400, image.shape[1])) |
|
|
image = imutils.resize(image, width=min(400, image.shape[1])) |
|
|
clone = image.copy() |
|
|
|
|
|
if len(image) <= 0: |
|
|
if len(image) <= 0: |
|
|
print("[ERROR] could not read local image") |
|
|
print("[ERROR] could not read local image") |
|
|
return result |
|
|
return result |
|
|
print("[INFO] Detecting people") |
|
|
print("[INFO] Detecting people") |
|
|
result = detector(image) |
|
|
result = detector(image) |
|
|
|
|
|
|
|
|
"""# shows the result |
|
|
|
|
|
for (xA, yA, xB, yB) in result: |
|
|
|
|
|
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2) |
|
|
|
|
|
|
|
|
if VERBOSITY: |
|
|
|
|
|
# shows the result |
|
|
|
|
|
for (xA, yA, xB, yB) in result: |
|
|
|
|
|
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2) |
|
|
|
|
|
|
|
|
cv2.imshow("result", image) |
|
|
|
|
|
cv2.waitKey(0) |
|
|
|
|
|
cv2.destroyAllWindows() |
|
|
|
|
|
|
|
|
cv2.imshow("result", image) |
|
|
|
|
|
cv2.waitKey(0) |
|
|
|
|
|
cv2.destroyWindow("result") |
|
|
|
|
|
|
|
|
cv2.imwrite("result.png", np.hstack((clone, image)))""" |
|
|
|
|
|
|
|
|
#cv2.imwrite("result.png", np.hstack((clone, image))) |
|
|
return result#(result, image) |
|
|
return result#(result, image) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def cameraDetect(video_path="", sample_time=5): |
|
|
|
|
|
|
|
|
|
|
|
if video_path: |
|
|
|
|
|
cap = cv2.VideoCapture(video_path) |
|
|
|
|
|
else: |
|
|
|
|
|
cap = cv2.VideoCapture(0) |
|
|
|
|
|
|
|
|
|
|
|
#init = time.time() |
|
|
|
|
|
|
|
|
|
|
|
while(True): |
|
|
|
|
|
|
|
|
def videoDetect(cap): |
|
|
|
|
|
while True: |
|
|
# Capture frame-by-frame |
|
|
# Capture frame-by-frame |
|
|
_, frame = cap.read() |
|
|
_, frame = cap.read() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# shows the result |
|
|
# shows the result |
|
|
for (xA, yA, xB, yB) in result: |
|
|
for (xA, yA, xB, yB) in result: |
|
|
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2) |
|
|
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2) |
|
|
cv2.imshow('frame', frame) |
|
|
|
|
|
cv2.waitKey(0) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if VERBOSITY: |
|
|
|
|
|
cv2.imshow('frame', frame) |
|
|
|
|
|
cv2.waitKey(0) |
|
|
|
|
|
|
|
|
#if time.time() - init >= sample_time: |
|
|
#if time.time() - init >= sample_time: |
|
|
if len(result): |
|
|
|
|
|
|
|
|
if result: |
|
|
print("{} people detected.".format(len(result))) |
|
|
print("{} people detected.".format(len(result))) |
|
|
#init = time.time() |
|
|
#init = time.time() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if cv2.waitKey(1) & 0xFF == ord('q'): |
|
|
if cv2.waitKey(1) & 0xFF == ord('q'): |
|
|
break |
|
|
break |
|
|
|
|
|
|
|
|
cap.release() |
|
|
cap.release() |
|
|
cv2.destroyAllWindows() |
|
|
cv2.destroyAllWindows() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_to_base64(image): |
|
|
|
|
|
image = imutils.resize(image, width=400) |
|
|
|
|
|
img_str = cv2.imencode('.png', image)[1].tostring() |
|
|
|
|
|
b64 = base64.b64encode(img_str) |
|
|
|
|
|
|
|
|
|
|
|
return b64.decode('utf-8') |
|
|
|
|
|
|
|
|
def remoteDetect(image_hub): |
|
|
|
|
|
while True: |
|
|
|
|
|
rpi_name, image = image_hub.recv_image() |
|
|
|
|
|
cv2.imshow(rpi_name, image) # 1 window for each RPi |
|
|
|
|
|
cv2.waitKey(1) |
|
|
|
|
|
image_hub.send_reply(b'OK') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def detectPeople(args): |
|
|
def detectPeople(args): |
|
|
image_path = args["image"] |
|
|
image_path = args["image"] |
|
|
video_path = args["video"] |
|
|
video_path = args["video"] |
|
|
camera = True if str(args["camera"]) == 'true' else False |
|
|
|
|
|
|
|
|
camera = True if args["camera"] else False |
|
|
|
|
|
remote = True if args["remote"] else False |
|
|
|
|
|
|
|
|
# Routine to read local image |
|
|
# Routine to read local image |
|
|
if image_path != None and not camera and video_path == None: |
|
|
|
|
|
|
|
|
if image_path is not None: |
|
|
print("[INFO] Image path provided, attempting to read image") |
|
|
print("[INFO] Image path provided, attempting to read image") |
|
|
(result, image) = localDetect(image_path) |
|
|
(result, image) = localDetect(image_path) |
|
|
print(str(len(result)) + " People detected.") |
|
|
print(str(len(result)) + " People detected.") |
|
|
|
|
|
|
|
|
if video_path != None and not camera: |
|
|
|
|
|
print("[INFO] reading video") |
|
|
|
|
|
cameraDetect(video_path) |
|
|
|
|
|
|
|
|
elif video_path is not None: |
|
|
|
|
|
print("[INFO] Video path provided, reading video") |
|
|
|
|
|
cap = cv2.VideoCapture(video_path) |
|
|
|
|
|
videoDetect(cap) |
|
|
|
|
|
|
|
|
# Routine to read images from webcam |
|
|
# Routine to read images from webcam |
|
|
if camera: |
|
|
|
|
|
print("[INFO] reading camera images") |
|
|
|
|
|
cameraDetect() |
|
|
|
|
|
|
|
|
elif camera: |
|
|
|
|
|
print("[INFO] Reading images from local camera") |
|
|
|
|
|
cap = cv2.VideoCapture(0) |
|
|
|
|
|
videoDetect(cap) |
|
|
|
|
|
|
|
|
|
|
|
elif remote: |
|
|
|
|
|
print("[INFO] Reading images from remote stream") |
|
|
|
|
|
image_hub = imagezmq.ImageHub() |
|
|
|
|
|
remoteDetect(image_hub) |
|
|
|
|
|
|
|
|
|
|
|
else: |
|
|
|
|
|
usage() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
def main(): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
if __name__ == '__main__': |
|
|
main() |
|
|
|
|
|
|
|
|
main() |