203 lines
5.8 KiB
Python
Executable File
203 lines
5.8 KiB
Python
Executable File
#!/usr/bin/env python
|
|
|
|
import argparse
|
|
import numpy as np
|
|
import cv2
|
|
import imutils
|
|
from imutils.object_detection import non_max_suppression
|
|
from video_stream import imagezmq
|
|
|
|
|
|
'''
|
|
Usage:
|
|
|
|
python peopleCounter.py -i PATH_TO_IMAGE # Reads and detect people in a single local stored image
|
|
python peopleCounter.py -c # Attempts to detect people using webcam
|
|
'''
|
|
|
|
HOGCV = cv2.HOGDescriptor()
|
|
HOGCV.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
|
|
#HOGCV.set
|
|
VERBOSITY = False
|
|
|
|
def detector(image):
|
|
'''
|
|
@image is a numpy array
|
|
'''
|
|
|
|
clone = image.copy()
|
|
|
|
|
|
|
|
(rects, _) = HOGCV.detectMultiScale(image, winStride=(2, 2), padding=(8, 8), scale=1.05)
|
|
|
|
# draw the original bounding boxes
|
|
for (x, y, w, h) in rects:
|
|
cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 0, 255), 2)
|
|
|
|
# Applies non-max supression from imutils package to kick-off overlapped
|
|
# boxes
|
|
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
|
|
result = non_max_suppression(rects, probs=None, overlapThresh=0.65)
|
|
|
|
return result
|
|
|
|
def args_parser():
|
|
''' images, videos, remote or a local camera feed allowed
|
|
verbose for added debugging'''
|
|
ap = argparse.ArgumentParser()
|
|
ap.add_argument("-i", "--image", default=None,
|
|
help="path to image test file directory")
|
|
ap.add_argument("-c", "--camera", action="store_true", default=False,
|
|
help="Set as true if you wish to use the camera")
|
|
ap.add_argument("-v", "--video", default=None,
|
|
help="path to the video file")
|
|
ap.add_argument("-r", "--remote", action="store_true", default=False,
|
|
help="video comes from remote source via imagezmq")
|
|
ap.add_argument("--verbose", action="store_true", default=False,
|
|
help="increase output verbosity")
|
|
args = vars(ap.parse_args())
|
|
|
|
if args["verbose"]:
|
|
VERBOSITY = True
|
|
|
|
return args
|
|
|
|
def usage():
|
|
print("usage: counter_people.py [-h] [-i IMAGE] [-c] [-v] [-r REMOTE] [--verbose]")
|
|
print()
|
|
print("optional arguments:")
|
|
print(" -h, --help show this help message and exit")
|
|
print(" -i IMAGE, --image IMAGE")
|
|
print(" path to image test file directory")
|
|
print(" -c, --camera Set as true if you wish to use the camera")
|
|
print(" -v, --video path to the video file")
|
|
print(" -r REMOTE, --remote REMOTE")
|
|
print(" video comes from remote source via imagezmq")
|
|
print(" --verbose increase output verbosity")
|
|
|
|
def localDetect(image_path):
|
|
result = []
|
|
image = cv2.imread(image_path)
|
|
image = imutils.resize(image, width=min(400, image.shape[1]))
|
|
if len(image) <= 0:
|
|
print("[ERROR] could not read local image")
|
|
return result
|
|
print("[INFO] Detecting people")
|
|
result = detector(image)
|
|
|
|
if VERBOSITY:
|
|
# shows the result
|
|
for (xA, yA, xB, yB) in result:
|
|
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
|
|
|
|
cv2.imshow("result", image)
|
|
cv2.waitKey(0)
|
|
cv2.destroyWindow("result")
|
|
|
|
#cv2.imwrite("result.png", np.hstack((clone, image)))
|
|
return result#(result, image)
|
|
|
|
|
|
def videoDetect(cap):
|
|
while True:
|
|
# Capture frame-by-frame
|
|
_, frame = cap.read()
|
|
|
|
if frame is None:
|
|
break
|
|
|
|
frame = imutils.resize(frame, width=min(400, frame.shape[1]))
|
|
result = detector(frame.copy())
|
|
|
|
# shows the result
|
|
for (xA, yA, xB, yB) in result:
|
|
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
|
|
|
|
if VERBOSITY:
|
|
cv2.imshow('frame', frame)
|
|
cv2.waitKey(0)
|
|
|
|
#if time.time() - init >= sample_time:
|
|
if result:
|
|
print("{} people detected.".format(len(result)))
|
|
#init = time.time()
|
|
|
|
if cv2.waitKey(1) & 0xFF == ord('q'):
|
|
break
|
|
|
|
# When everything done, release the capture
|
|
cap.release()
|
|
cv2.destroyAllWindows()
|
|
|
|
def remoteDetect(image_hub):
|
|
while True:
|
|
rpi_name, frame = image_hub.recv_image()
|
|
image_hub.send_reply(b'OK')
|
|
|
|
frame = imutils.resize(frame, width=min(400, frame.shape[1]))
|
|
result = detector(frame.copy())
|
|
|
|
# shows the result
|
|
for (xA, yA, xB, yB) in result:
|
|
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
|
|
|
|
#if VERBOSITY:
|
|
cv2.imshow('frame', frame)
|
|
#cv2.waitKey(0)
|
|
|
|
#if time.time() - init >= sample_time:
|
|
if len(result):
|
|
print("{} people detected.".format(len(result)))
|
|
#init = time.time()
|
|
|
|
if cv2.waitKey(1) & 0xFF == ord('q'):
|
|
break
|
|
|
|
#cv2.imshow(rpi_name, frame) # 1 window for each RPi
|
|
#cv2.waitKey(1)
|
|
|
|
|
|
|
|
|
|
def detectPeople(args):
|
|
image_path = args["image"]
|
|
video_path = args["video"]
|
|
camera = True if args["camera"] else False
|
|
remote = True if args["remote"] else False
|
|
|
|
# Routine to read local image
|
|
if image_path is not None:
|
|
print("[INFO] Image path provided, attempting to read image")
|
|
(result, image) = localDetect(image_path)
|
|
print(str(len(result)) + " People detected.")
|
|
|
|
elif video_path is not None:
|
|
print("[INFO] Video path provided, reading video")
|
|
cap = cv2.VideoCapture(video_path)
|
|
videoDetect(cap)
|
|
|
|
# Routine to read images from webcam
|
|
elif camera:
|
|
print("[INFO] Reading images from local camera")
|
|
cap = cv2.VideoCapture(0)
|
|
videoDetect(cap)
|
|
|
|
elif remote:
|
|
print("[INFO] Reading images from remote stream")
|
|
image_hub = imagezmq.ImageHub()
|
|
remoteDetect(image_hub)
|
|
|
|
else:
|
|
usage()
|
|
|
|
|
|
|
|
def main():
|
|
args = args_parser()
|
|
detectPeople(args)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|