Browse Source

Add vidual-debugging to video_pres and video capabilities to couter

master
Lennart Heimbs 4 years ago
parent
commit
d2aa68c5dd
2 changed files with 74 additions and 66 deletions
  1. 33
    41
      camera/counter_people.py
  2. 41
    25
      camera/video_presence.py

+ 33
- 41
camera/counter_people.py View File

@@ -2,7 +2,6 @@ from imutils.object_detection import non_max_suppression
import numpy as np
import imutils
import cv2
import requests
import time
import argparse
import time
@@ -25,8 +24,7 @@ def detector(image):

clone = image.copy()

(rects, weights) = HOGCV.detectMultiScale(image, winStride=(4, 4),
padding=(8, 8), scale=1.05)
(rects, weights) = HOGCV.detectMultiScale(image, winStride=(4, 4), padding=(8, 8), scale=1.05)

# draw the original bounding boxes
for (x, y, w, h) in rects:
@@ -46,10 +44,9 @@ def buildPayload(variable, value, context):

def argsParser():
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", default=None,
help="path to image test file directory")
ap.add_argument("-c", "--camera", default=False,
help="Set as true if you wish to use the camera")
ap.add_argument("-i", "--image", default=None, help="path to image test file directory")
ap.add_argument("-c", "--camera", default=False, help="Set as true if you wish to use the camera")
ap.add_argument("-v", "--video", default=None, help="path to the video file")
args = vars(ap.parse_args())

return args
@@ -78,35 +75,35 @@ def localDetect(image_path):
return result#(result, image)


def cameraDetect(token, device, variable, sample_time=5):
def cameraDetect(video_path="", sample_time=5):

cap = cv2.VideoCapture(0)
init = time.time()
# Allowed sample time for Ubidots is 1 dot/second
if sample_time < 1:
sample_time = 1
if video_path:
cap = cv2.VideoCapture(video_path)
else:
cap = cv2.VideoCapture(0)
#init = time.time()

while(True):
# Capture frame-by-frame
ret, frame = cap.read()
_, frame = cap.read()

if frame is None:
break

frame = imutils.resize(frame, width=min(400, frame.shape[1]))
result = detector(frame.copy())

# shows the result
#for (xA, yA, xB, yB) in result:
# cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
#cv2.imshow('frame', frame)

# Sends results
if time.time() - init >= sample_time:
#print("[INFO] Sending actual frame results")
# Converts the image to base 64 and adds it to the context
#b64 = convert_to_base64(frame)
#context = {"image": b64}
if len(result):
print("{} people detected.".format(len(result)))
init = time.time()
for (xA, yA, xB, yB) in result:
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
cv2.imshow('frame', frame)
cv2.waitKey(0)

#if time.time() - init >= sample_time:
if len(result):
print("{} people detected.".format(len(result)))
#init = time.time()

if cv2.waitKey(1) & 0xFF == ord('q'):
@@ -127,28 +124,23 @@ def convert_to_base64(image):

def detectPeople(args):
image_path = args["image"]
video_path = args["video"]
camera = True if str(args["camera"]) == 'true' else False

# Routine to read local image
if image_path != None and not camera:
if image_path != None and not camera and video_path == None:
print("[INFO] Image path provided, attempting to read image")
(result, image) = localDetect(image_path)
print("[INFO] sending results")
# Converts the image to base 64 and adds it to the context
b64 = convert_to_base64(image)
context = {"image": b64}
print(len(result))
# Sends the result
"""req = sendToUbidots(TOKEN, DEVICE, VARIABLE,
len(result), context=context)
if req.status_code >= 400:
print("[ERROR] Could not send data to Ubidots")
return req"""
print(str(len(result)) + " People detected.")

if video_path != None and not camera:
print("[INFO] reading video")
cameraDetect(video_path)

# Routine to read images from webcam
if camera:
print("[INFO] reading camera images")
cameraDetect(TOKEN, DEVICE, VARIABLE)
cameraDetect()


def main():

+ 41
- 25
camera/video_presence.py View File

@@ -10,6 +10,8 @@ from datetime import datetime, time
import numpy as np
import time as time2

VISUAL_DEBUG=True

""" Arguments """
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
@@ -20,12 +22,6 @@ args = vars(ap.parse_args())
""" Determine opencv version and select tracker """
# extract the OpenCV version info
(major, minor) = cv2.__version__.split(".")[:2]
# different methods of opencv require differing ways to unpack find countours
if int(major) > 3:
OPENCV4=True
else:
OPENCV4=False

# if we are using OpenCV 3.2 or an earlier version, we can use a special factory
# function to create the entity that tracks objects
if int(major) == 3 and int(minor) < 3:
@@ -56,7 +52,6 @@ if args.get("video", None) is None:
# otherwise, we are reading from a video file
else:
vs = cv2.VideoCapture(args["video"])
#vs.set(cv2.CAP_PROP_FPS, 2)

"""" Analyzing video frames """
# loop over the frames of the video, and store corresponding information from each frame
@@ -68,13 +63,14 @@ now = ''
framecounter = 0
trackeron = 0
people_count_total = 0
frame_counter= 0

while True:
"""frame_counter+=1
if framecounter%5 != 0:
continue"""
cv2.namedWindow('Video stream', cv2.WINDOW_NORMAL)
if VISUAL_DEBUG:
cv2.namedWindow('debug image', cv2.WINDOW_NORMAL)

while True:
if VISUAL_DEBUG:
print("Frame {}".format(framecounter))
people_count_per_frame = 0
frame = vs.read()
frame = frame if args.get("video", None) is None else frame[1]
@@ -100,18 +96,26 @@ while True:
# compute the absolute difference between the current frame and first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]

# dilate the thresholded image to fill in holes, then find contours on thresholded image
#debug
if VISUAL_DEBUG:
cv2.imshow("debug image", thresh)
cv2.waitKey(0)
#cv2.destroyWindow("threshhold image")
# dilate the thresholded image to fill in holes
thresh = cv2.dilate(thresh, None, iterations=2)

# find contours on thresholded image
thresh = np.uint8(thresh)
if OPENCV4:
cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
else:
_, cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#cnts = cnts if imutils.is_cv2() else im2
#print(len(cnts))
#if len(cnts) > 1:
#cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if VISUAL_DEBUG:
"""img = cv2.drawContours(thresh.copy(), cnts, -1, (0,255,0), 3)
cv2.imshow("debug image", img)
cv2.waitKey(0)"""
print(len(cnts))

# loop over the contours identified
contourcount = 0
@@ -136,6 +140,13 @@ while True:
trackbox = frame[y:y+h, x:x+w]
trackbox = cv2.resize(trackbox, (224, 224))
#cv2.imshow('image',trackbox)

"""if VISUAL_DEBUG:
trackbox2 = thresh[y:y+h, x:x+w]
trackbox2 = cv2.resize(trackbox2, (224, 224))
cv2.imshow('debug image',trackbox2)
cv2.waitKey(0)"""
blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
@@ -143,7 +154,7 @@ while True:
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]

confidence_level = 0.8
confidence_level = 0.95

if confidence > confidence_level:
people_count_per_frame+=1
@@ -166,6 +177,11 @@ while True:
#cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[0], 2)

if VISUAL_DEBUG:
print("person found")
cv2.imshow("debug image", frame)
key = cv2.waitKey(0)

cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
# Start tracker
now = datetime.now()
@@ -176,7 +192,7 @@ while True:

# check to see if we are currently tracking an object, if so, ignore other boxes
# this code is relevant if we want to identify particular persons
if initBB2 is not None:
"""if initBB2 is not None:

# grab the new bounding box coordinates of the object
(success, box) = tracker.update(frame)
@@ -214,7 +230,7 @@ while True:
# draw the text and timestamp on the frame
now2 = datetime.now()
time_passed_seconds = str((now2-now).seconds)
cv2.putText(frame, 'Detecting persons',(10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, 'Detecting persons',(10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)"""

# show the frame and record if the user presses a key
cv2.imshow("Video stream", frame)

Loading…
Cancel
Save