Browse Source

Add vidual-debugging to video_pres and video capabilities to couter

master
Lennart Heimbs 5 years ago
parent
commit
d2aa68c5dd
2 changed files with 74 additions and 66 deletions
  1. 33
    41
      camera/counter_people.py
  2. 41
    25
      camera/video_presence.py

+ 33
- 41
camera/counter_people.py View File

import numpy as np import numpy as np
import imutils import imutils
import cv2 import cv2
import requests
import time import time
import argparse import argparse
import time import time


clone = image.copy() clone = image.copy()


(rects, weights) = HOGCV.detectMultiScale(image, winStride=(4, 4),
padding=(8, 8), scale=1.05)
(rects, weights) = HOGCV.detectMultiScale(image, winStride=(4, 4), padding=(8, 8), scale=1.05)


# draw the original bounding boxes # draw the original bounding boxes
for (x, y, w, h) in rects: for (x, y, w, h) in rects:


def argsParser(): def argsParser():
ap = argparse.ArgumentParser() ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", default=None,
help="path to image test file directory")
ap.add_argument("-c", "--camera", default=False,
help="Set as true if you wish to use the camera")
ap.add_argument("-i", "--image", default=None, help="path to image test file directory")
ap.add_argument("-c", "--camera", default=False, help="Set as true if you wish to use the camera")
ap.add_argument("-v", "--video", default=None, help="path to the video file")
args = vars(ap.parse_args()) args = vars(ap.parse_args())


return args return args
return result#(result, image) return result#(result, image)




def cameraDetect(token, device, variable, sample_time=5):
def cameraDetect(video_path="", sample_time=5):


cap = cv2.VideoCapture(0)
init = time.time()
# Allowed sample time for Ubidots is 1 dot/second
if sample_time < 1:
sample_time = 1
if video_path:
cap = cv2.VideoCapture(video_path)
else:
cap = cv2.VideoCapture(0)
#init = time.time()


while(True): while(True):
# Capture frame-by-frame # Capture frame-by-frame
ret, frame = cap.read()
_, frame = cap.read()

if frame is None:
break

frame = imutils.resize(frame, width=min(400, frame.shape[1])) frame = imutils.resize(frame, width=min(400, frame.shape[1]))
result = detector(frame.copy()) result = detector(frame.copy())


# shows the result # shows the result
#for (xA, yA, xB, yB) in result:
# cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
#cv2.imshow('frame', frame)

# Sends results
if time.time() - init >= sample_time:
#print("[INFO] Sending actual frame results")
# Converts the image to base 64 and adds it to the context
#b64 = convert_to_base64(frame)
#context = {"image": b64}
if len(result):
print("{} people detected.".format(len(result)))
init = time.time()
for (xA, yA, xB, yB) in result:
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
cv2.imshow('frame', frame)
cv2.waitKey(0)

#if time.time() - init >= sample_time:
if len(result):
print("{} people detected.".format(len(result)))
#init = time.time()


if cv2.waitKey(1) & 0xFF == ord('q'): if cv2.waitKey(1) & 0xFF == ord('q'):


def detectPeople(args): def detectPeople(args):
image_path = args["image"] image_path = args["image"]
video_path = args["video"]
camera = True if str(args["camera"]) == 'true' else False camera = True if str(args["camera"]) == 'true' else False


# Routine to read local image # Routine to read local image
if image_path != None and not camera:
if image_path != None and not camera and video_path == None:
print("[INFO] Image path provided, attempting to read image") print("[INFO] Image path provided, attempting to read image")
(result, image) = localDetect(image_path) (result, image) = localDetect(image_path)
print("[INFO] sending results")
# Converts the image to base 64 and adds it to the context
b64 = convert_to_base64(image)
context = {"image": b64}
print(len(result))
# Sends the result
"""req = sendToUbidots(TOKEN, DEVICE, VARIABLE,
len(result), context=context)
if req.status_code >= 400:
print("[ERROR] Could not send data to Ubidots")
return req"""
print(str(len(result)) + " People detected.")

if video_path != None and not camera:
print("[INFO] reading video")
cameraDetect(video_path)


# Routine to read images from webcam # Routine to read images from webcam
if camera: if camera:
print("[INFO] reading camera images") print("[INFO] reading camera images")
cameraDetect(TOKEN, DEVICE, VARIABLE)
cameraDetect()




def main(): def main():

+ 41
- 25
camera/video_presence.py View File

import numpy as np import numpy as np
import time as time2 import time as time2


VISUAL_DEBUG=True

""" Arguments """ """ Arguments """
ap = argparse.ArgumentParser() ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file") ap.add_argument("-v", "--video", help="path to the video file")
""" Determine opencv version and select tracker """ """ Determine opencv version and select tracker """
# extract the OpenCV version info # extract the OpenCV version info
(major, minor) = cv2.__version__.split(".")[:2] (major, minor) = cv2.__version__.split(".")[:2]
# different methods of opencv require differing ways to unpack find countours
if int(major) > 3:
OPENCV4=True
else:
OPENCV4=False

# if we are using OpenCV 3.2 or an earlier version, we can use a special factory # if we are using OpenCV 3.2 or an earlier version, we can use a special factory
# function to create the entity that tracks objects # function to create the entity that tracks objects
if int(major) == 3 and int(minor) < 3: if int(major) == 3 and int(minor) < 3:
# otherwise, we are reading from a video file # otherwise, we are reading from a video file
else: else:
vs = cv2.VideoCapture(args["video"]) vs = cv2.VideoCapture(args["video"])
#vs.set(cv2.CAP_PROP_FPS, 2)


"""" Analyzing video frames """ """" Analyzing video frames """
# loop over the frames of the video, and store corresponding information from each frame # loop over the frames of the video, and store corresponding information from each frame
framecounter = 0 framecounter = 0
trackeron = 0 trackeron = 0
people_count_total = 0 people_count_total = 0
frame_counter= 0


while True:
"""frame_counter+=1
if framecounter%5 != 0:
continue"""
cv2.namedWindow('Video stream', cv2.WINDOW_NORMAL)
if VISUAL_DEBUG:
cv2.namedWindow('debug image', cv2.WINDOW_NORMAL)


while True:
if VISUAL_DEBUG:
print("Frame {}".format(framecounter))
people_count_per_frame = 0 people_count_per_frame = 0
frame = vs.read() frame = vs.read()
frame = frame if args.get("video", None) is None else frame[1] frame = frame if args.get("video", None) is None else frame[1]
# compute the absolute difference between the current frame and first frame # compute the absolute difference between the current frame and first frame
frameDelta = cv2.absdiff(firstFrame, gray) frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]

# dilate the thresholded image to fill in holes, then find contours on thresholded image
#debug
if VISUAL_DEBUG:
cv2.imshow("debug image", thresh)
cv2.waitKey(0)
#cv2.destroyWindow("threshhold image")
# dilate the thresholded image to fill in holes
thresh = cv2.dilate(thresh, None, iterations=2) thresh = cv2.dilate(thresh, None, iterations=2)

# find contours on thresholded image
thresh = np.uint8(thresh) thresh = np.uint8(thresh)
if OPENCV4:
cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
else:
_, cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#cnts = cnts if imutils.is_cv2() else im2
#print(len(cnts))
#if len(cnts) > 1:
#cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if VISUAL_DEBUG:
"""img = cv2.drawContours(thresh.copy(), cnts, -1, (0,255,0), 3)
cv2.imshow("debug image", img)
cv2.waitKey(0)"""
print(len(cnts))


# loop over the contours identified # loop over the contours identified
contourcount = 0 contourcount = 0
trackbox = frame[y:y+h, x:x+w] trackbox = frame[y:y+h, x:x+w]
trackbox = cv2.resize(trackbox, (224, 224)) trackbox = cv2.resize(trackbox, (224, 224))
#cv2.imshow('image',trackbox) #cv2.imshow('image',trackbox)

"""if VISUAL_DEBUG:
trackbox2 = thresh[y:y+h, x:x+w]
trackbox2 = cv2.resize(trackbox2, (224, 224))
cv2.imshow('debug image',trackbox2)
cv2.waitKey(0)"""
blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5) blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5)
net.setInput(blob) net.setInput(blob)
detections = net.forward() detections = net.forward()
for i in np.arange(0, detections.shape[2]): for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2] confidence = detections[0, 0, i, 2]


confidence_level = 0.8
confidence_level = 0.95


if confidence > confidence_level: if confidence > confidence_level:
people_count_per_frame+=1 people_count_per_frame+=1
#cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2) #cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[0], 2) cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[0], 2)


if VISUAL_DEBUG:
print("person found")
cv2.imshow("debug image", frame)
key = cv2.waitKey(0)

cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2) cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
# Start tracker # Start tracker
now = datetime.now() now = datetime.now()


# check to see if we are currently tracking an object, if so, ignore other boxes # check to see if we are currently tracking an object, if so, ignore other boxes
# this code is relevant if we want to identify particular persons # this code is relevant if we want to identify particular persons
if initBB2 is not None:
"""if initBB2 is not None:


# grab the new bounding box coordinates of the object # grab the new bounding box coordinates of the object
(success, box) = tracker.update(frame) (success, box) = tracker.update(frame)
# draw the text and timestamp on the frame # draw the text and timestamp on the frame
now2 = datetime.now() now2 = datetime.now()
time_passed_seconds = str((now2-now).seconds) time_passed_seconds = str((now2-now).seconds)
cv2.putText(frame, 'Detecting persons',(10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, 'Detecting persons',(10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)"""


# show the frame and record if the user presses a key # show the frame and record if the user presses a key
cv2.imshow("Video stream", frame) cv2.imshow("Video stream", frame)

Loading…
Cancel
Save