|
|
|
|
|
|
|
|
import numpy as np |
|
|
import numpy as np |
|
|
import time as time2 |
|
|
import time as time2 |
|
|
|
|
|
|
|
|
|
|
|
VISUAL_DEBUG=True |
|
|
|
|
|
|
|
|
""" Arguments """ |
|
|
""" Arguments """ |
|
|
ap = argparse.ArgumentParser() |
|
|
ap = argparse.ArgumentParser() |
|
|
ap.add_argument("-v", "--video", help="path to the video file") |
|
|
ap.add_argument("-v", "--video", help="path to the video file") |
|
|
|
|
|
|
|
|
""" Determine opencv version and select tracker """ |
|
|
""" Determine opencv version and select tracker """ |
|
|
# extract the OpenCV version info |
|
|
# extract the OpenCV version info |
|
|
(major, minor) = cv2.__version__.split(".")[:2] |
|
|
(major, minor) = cv2.__version__.split(".")[:2] |
|
|
# different methods of opencv require differing ways to unpack find countours |
|
|
|
|
|
if int(major) > 3: |
|
|
|
|
|
OPENCV4=True |
|
|
|
|
|
else: |
|
|
|
|
|
OPENCV4=False |
|
|
|
|
|
|
|
|
|
|
|
# if we are using OpenCV 3.2 or an earlier version, we can use a special factory |
|
|
# if we are using OpenCV 3.2 or an earlier version, we can use a special factory |
|
|
# function to create the entity that tracks objects |
|
|
# function to create the entity that tracks objects |
|
|
if int(major) == 3 and int(minor) < 3: |
|
|
if int(major) == 3 and int(minor) < 3: |
|
|
|
|
|
|
|
|
# otherwise, we are reading from a video file |
|
|
# otherwise, we are reading from a video file |
|
|
else: |
|
|
else: |
|
|
vs = cv2.VideoCapture(args["video"]) |
|
|
vs = cv2.VideoCapture(args["video"]) |
|
|
#vs.set(cv2.CAP_PROP_FPS, 2) |
|
|
|
|
|
|
|
|
|
|
|
"""" Analyzing video frames """ |
|
|
"""" Analyzing video frames """ |
|
|
# loop over the frames of the video, and store corresponding information from each frame |
|
|
# loop over the frames of the video, and store corresponding information from each frame |
|
|
|
|
|
|
|
|
framecounter = 0 |
|
|
framecounter = 0 |
|
|
trackeron = 0 |
|
|
trackeron = 0 |
|
|
people_count_total = 0 |
|
|
people_count_total = 0 |
|
|
frame_counter= 0 |
|
|
|
|
|
|
|
|
|
|
|
while True: |
|
|
|
|
|
"""frame_counter+=1 |
|
|
|
|
|
if framecounter%5 != 0: |
|
|
|
|
|
continue""" |
|
|
|
|
|
|
|
|
cv2.namedWindow('Video stream', cv2.WINDOW_NORMAL) |
|
|
|
|
|
if VISUAL_DEBUG: |
|
|
|
|
|
cv2.namedWindow('debug image', cv2.WINDOW_NORMAL) |
|
|
|
|
|
|
|
|
|
|
|
while True: |
|
|
|
|
|
if VISUAL_DEBUG: |
|
|
|
|
|
print("Frame {}".format(framecounter)) |
|
|
people_count_per_frame = 0 |
|
|
people_count_per_frame = 0 |
|
|
frame = vs.read() |
|
|
frame = vs.read() |
|
|
frame = frame if args.get("video", None) is None else frame[1] |
|
|
frame = frame if args.get("video", None) is None else frame[1] |
|
|
|
|
|
|
|
|
# compute the absolute difference between the current frame and first frame |
|
|
# compute the absolute difference between the current frame and first frame |
|
|
frameDelta = cv2.absdiff(firstFrame, gray) |
|
|
frameDelta = cv2.absdiff(firstFrame, gray) |
|
|
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] |
|
|
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] |
|
|
|
|
|
|
|
|
# dilate the thresholded image to fill in holes, then find contours on thresholded image |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#debug |
|
|
|
|
|
if VISUAL_DEBUG: |
|
|
|
|
|
cv2.imshow("debug image", thresh) |
|
|
|
|
|
cv2.waitKey(0) |
|
|
|
|
|
#cv2.destroyWindow("threshhold image") |
|
|
|
|
|
|
|
|
|
|
|
# dilate the thresholded image to fill in holes |
|
|
thresh = cv2.dilate(thresh, None, iterations=2) |
|
|
thresh = cv2.dilate(thresh, None, iterations=2) |
|
|
|
|
|
|
|
|
|
|
|
# find contours on thresholded image |
|
|
thresh = np.uint8(thresh) |
|
|
thresh = np.uint8(thresh) |
|
|
if OPENCV4: |
|
|
|
|
|
cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) |
|
|
|
|
|
else: |
|
|
|
|
|
_, cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) |
|
|
|
|
|
#cnts = cnts if imutils.is_cv2() else im2 |
|
|
|
|
|
#print(len(cnts)) |
|
|
|
|
|
#if len(cnts) > 1: |
|
|
|
|
|
#cnts = cnts[0] if imutils.is_cv2() else cnts[1] |
|
|
|
|
|
|
|
|
cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) |
|
|
|
|
|
|
|
|
|
|
|
if VISUAL_DEBUG: |
|
|
|
|
|
"""img = cv2.drawContours(thresh.copy(), cnts, -1, (0,255,0), 3) |
|
|
|
|
|
cv2.imshow("debug image", img) |
|
|
|
|
|
cv2.waitKey(0)""" |
|
|
|
|
|
|
|
|
|
|
|
print(len(cnts)) |
|
|
|
|
|
|
|
|
# loop over the contours identified |
|
|
# loop over the contours identified |
|
|
contourcount = 0 |
|
|
contourcount = 0 |
|
|
|
|
|
|
|
|
trackbox = frame[y:y+h, x:x+w] |
|
|
trackbox = frame[y:y+h, x:x+w] |
|
|
trackbox = cv2.resize(trackbox, (224, 224)) |
|
|
trackbox = cv2.resize(trackbox, (224, 224)) |
|
|
#cv2.imshow('image',trackbox) |
|
|
#cv2.imshow('image',trackbox) |
|
|
|
|
|
|
|
|
|
|
|
"""if VISUAL_DEBUG: |
|
|
|
|
|
trackbox2 = thresh[y:y+h, x:x+w] |
|
|
|
|
|
trackbox2 = cv2.resize(trackbox2, (224, 224)) |
|
|
|
|
|
cv2.imshow('debug image',trackbox2) |
|
|
|
|
|
cv2.waitKey(0)""" |
|
|
|
|
|
|
|
|
blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5) |
|
|
blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5) |
|
|
net.setInput(blob) |
|
|
net.setInput(blob) |
|
|
detections = net.forward() |
|
|
detections = net.forward() |
|
|
|
|
|
|
|
|
for i in np.arange(0, detections.shape[2]): |
|
|
for i in np.arange(0, detections.shape[2]): |
|
|
confidence = detections[0, 0, i, 2] |
|
|
confidence = detections[0, 0, i, 2] |
|
|
|
|
|
|
|
|
confidence_level = 0.8 |
|
|
|
|
|
|
|
|
confidence_level = 0.95 |
|
|
|
|
|
|
|
|
if confidence > confidence_level: |
|
|
if confidence > confidence_level: |
|
|
people_count_per_frame+=1 |
|
|
people_count_per_frame+=1 |
|
|
|
|
|
|
|
|
#cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2) |
|
|
#cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2) |
|
|
cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[0], 2) |
|
|
cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[0], 2) |
|
|
|
|
|
|
|
|
|
|
|
if VISUAL_DEBUG: |
|
|
|
|
|
print("person found") |
|
|
|
|
|
cv2.imshow("debug image", frame) |
|
|
|
|
|
key = cv2.waitKey(0) |
|
|
|
|
|
|
|
|
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2) |
|
|
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2) |
|
|
# Start tracker |
|
|
# Start tracker |
|
|
now = datetime.now() |
|
|
now = datetime.now() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# check to see if we are currently tracking an object, if so, ignore other boxes |
|
|
# check to see if we are currently tracking an object, if so, ignore other boxes |
|
|
# this code is relevant if we want to identify particular persons |
|
|
# this code is relevant if we want to identify particular persons |
|
|
if initBB2 is not None: |
|
|
|
|
|
|
|
|
"""if initBB2 is not None: |
|
|
|
|
|
|
|
|
# grab the new bounding box coordinates of the object |
|
|
# grab the new bounding box coordinates of the object |
|
|
(success, box) = tracker.update(frame) |
|
|
(success, box) = tracker.update(frame) |
|
|
|
|
|
|
|
|
# draw the text and timestamp on the frame |
|
|
# draw the text and timestamp on the frame |
|
|
now2 = datetime.now() |
|
|
now2 = datetime.now() |
|
|
time_passed_seconds = str((now2-now).seconds) |
|
|
time_passed_seconds = str((now2-now).seconds) |
|
|
cv2.putText(frame, 'Detecting persons',(10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) |
|
|
|
|
|
|
|
|
cv2.putText(frame, 'Detecting persons',(10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)""" |
|
|
|
|
|
|
|
|
# show the frame and record if the user presses a key |
|
|
# show the frame and record if the user presses a key |
|
|
cv2.imshow("Video stream", frame) |
|
|
cv2.imshow("Video stream", frame) |