Browse Source

Upload files to 'camera'

master
Lennart Heimbs 4 years ago
parent
commit
36bba050e7
2 changed files with 84 additions and 15 deletions
  1. 54
    0
      camera/image_presence.py
  2. 30
    15
      camera/video_presence.py

+ 54
- 0
camera/image_presence.py View File

#!/usr/bin/env python

# import the necessary packages
from __future__ import print_function
from imutils.object_detection import non_max_suppression
from imutils import paths
import numpy as np
import argparse
import imutils
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--images", required=True, help="path to images directory")
args = vars(ap.parse_args())

# initialize the HOG descriptor/person detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

# loop over the image paths
for imagePath in paths.list_images(args["images"]):
# load the image and resize it to (1) reduce detection time
# and (2) improve detection accuracy
image = cv2.imread(imagePath)
image = imutils.resize(image, width=min(400, image.shape[1]))
orig = image.copy()

# detect people in the image
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),
padding=(8, 8), scale=1.05)

# draw the original bounding boxes
for (x, y, w, h) in rects:
cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)

# apply non-maxima suppression to the bounding boxes using a
# fairly large overlap threshold to try to maintain overlapping
# boxes that are still people
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)

# draw the final bounding boxes
for (xA, yA, xB, yB) in pick:
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)

# show some information on the number of bounding boxes
filename = imagePath[imagePath.rfind("/") + 1:]
print("[INFO] {}: {} original boxes, {} after suppression".format(filename, len(rects), len(pick)))

# show the output images
#cv2.imshow("Before NMS", orig)
#cv2.imshow("After NMS", image)
#cv2.waitKey(0)

+ 30
- 15
camera/video_presence.py View File

#!/usr/bin/env python

from imutils.video import VideoStream from imutils.video import VideoStream
from imutils.video import FPS from imutils.video import FPS
import argparse import argparse
now = '' now = ''
framecounter = 0 framecounter = 0
trackeron = 0 trackeron = 0
people_count_total = 0


while True: while True:
people_count_per_frame = 0
frame = vs.read() frame = vs.read()
frame = frame if args.get("video", None) is None else frame[1] frame = frame if args.get("video", None) is None else frame[1]
# if the frame can not be grabbed, then we have reached the end of the video # if the frame can not be grabbed, then we have reached the end of the video


# dilate the thresholded image to fill in holes, then find contours on thresholded image # dilate the thresholded image to fill in holes, then find contours on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2) thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
thresh = np.uint8(thresh)
cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#cnts = cnts if imutils.is_cv2() else im2
#print(len(cnts))
#if len(cnts) > 1:
#cnts = cnts[0] if imutils.is_cv2() else cnts[1]


# loop over the contours identified # loop over the contours identified
contourcount = 0 contourcount = 0
for c in cnts: for c in cnts:
contourcount = contourcount + 1 contourcount = contourcount + 1


# if the contour is too small, ignore it
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]: if cv2.contourArea(c) < args["min_area"]:
continue continue


(x, y, w, h) = cv2.boundingRect(c) (x, y, w, h) = cv2.boundingRect(c)
initBB2 =(x,y,w,h) initBB2 =(x,y,w,h)


prott1 = r'ML-Models\MobileNetSSD_deploy.prototxt'
prott2 = r'ML-Models\MobileNetSSD_deploy.caffemodel'
prott1 = r'ML-Models/MobileNetSSD_deploy.prototxt'
prott2 = r'ML-Models/MobileNetSSD_deploy.caffemodel'
net = cv2.dnn.readNetFromCaffe(prott1, prott2) net = cv2.dnn.readNetFromCaffe(prott1, prott2)


CLASSES = ["person"] CLASSES = ["person"]
for i in np.arange(0, detections.shape[2]): for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2] confidence = detections[0, 0, i, 2]


confidence_level = 0.7
confidence_level = 0.8


if confidence > confidence_level: if confidence > confidence_level:
people_count_per_frame+=1
people_count_total+=1
# extract the index of the class label from the `detections`, then compute the (x, y)-coordinates of # extract the index of the class label from the `detections`, then compute the (x, y)-coordinates of
# the bounding box for the object # the bounding box for the object
idx = int(detections[0, 0, i, 1]) idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int") (startX, startY, endX, endY) = box.astype("int")
# draw the prediction on the frame # draw the prediction on the frame
label = "{}: {:.2f}%".format(CLASSES[idx],
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
#label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
label = "{}: {:.2f}%".format(CLASSES[0], confidence * 100)
#cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[0], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15 y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)

#cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[0], 2)


cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2) cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
# Start tracker # Start tracker




# check to see if we are currently tracking an object, if so, ignore other boxes # check to see if we are currently tracking an object, if so, ignore other boxes
# this code is relevant if we want to identify particular persons (section 2 of this tutorial)
# this code is relevant if we want to identify particular persons
if initBB2 is not None: if initBB2 is not None:


# grab the new bounding box coordinates of the object # grab the new bounding box coordinates of the object
info = [ info = [
("Success", "Yes" if success else "No"), ("Success", "Yes" if success else "No"),
("FPS", "{:.2f}".format(fps.fps())), ("FPS", "{:.2f}".format(fps.fps())),
("People Frame", "{}".format(people_count_per_frame)),
("People Total", "{}".format(people_count_total))
] ]


# loop over the info tuples and draw them on our frame # loop over the info tuples and draw them on our frame
# draw the text and timestamp on the frame # draw the text and timestamp on the frame
now2 = datetime.now() now2 = datetime.now()
time_passed_seconds = str((now2-now).seconds) time_passed_seconds = str((now2-now).seconds)
cv2.putText(frame, 'Detecting persons',(10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, 'Detecting persons',(10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)


# show the frame and record if the user presses a key # show the frame and record if the user presses a key
cv2.imshow("Video stream", frame) cv2.imshow("Video stream", frame)

Loading…
Cancel
Save