Upload files to 'camera'
This commit is contained in:
parent
57f8e55d2a
commit
36bba050e7
54
camera/image_presence.py
Normal file
54
camera/image_presence.py
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
# import the necessary packages
|
||||||
|
from __future__ import print_function
|
||||||
|
from imutils.object_detection import non_max_suppression
|
||||||
|
from imutils import paths
|
||||||
|
import numpy as np
|
||||||
|
import argparse
|
||||||
|
import imutils
|
||||||
|
import cv2
|
||||||
|
|
||||||
|
# construct the argument parse and parse the arguments
|
||||||
|
ap = argparse.ArgumentParser()
|
||||||
|
ap.add_argument("-i", "--images", required=True, help="path to images directory")
|
||||||
|
args = vars(ap.parse_args())
|
||||||
|
|
||||||
|
# initialize the HOG descriptor/person detector
|
||||||
|
hog = cv2.HOGDescriptor()
|
||||||
|
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
|
||||||
|
|
||||||
|
# loop over the image paths
|
||||||
|
for imagePath in paths.list_images(args["images"]):
|
||||||
|
# load the image and resize it to (1) reduce detection time
|
||||||
|
# and (2) improve detection accuracy
|
||||||
|
image = cv2.imread(imagePath)
|
||||||
|
image = imutils.resize(image, width=min(400, image.shape[1]))
|
||||||
|
orig = image.copy()
|
||||||
|
|
||||||
|
# detect people in the image
|
||||||
|
(rects, weights) = hog.detectMultiScale(image, winStride=(4, 4),
|
||||||
|
padding=(8, 8), scale=1.05)
|
||||||
|
|
||||||
|
# draw the original bounding boxes
|
||||||
|
for (x, y, w, h) in rects:
|
||||||
|
cv2.rectangle(orig, (x, y), (x + w, y + h), (0, 0, 255), 2)
|
||||||
|
|
||||||
|
# apply non-maxima suppression to the bounding boxes using a
|
||||||
|
# fairly large overlap threshold to try to maintain overlapping
|
||||||
|
# boxes that are still people
|
||||||
|
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
|
||||||
|
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
|
||||||
|
|
||||||
|
# draw the final bounding boxes
|
||||||
|
for (xA, yA, xB, yB) in pick:
|
||||||
|
cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
|
||||||
|
|
||||||
|
# show some information on the number of bounding boxes
|
||||||
|
filename = imagePath[imagePath.rfind("/") + 1:]
|
||||||
|
print("[INFO] {}: {} original boxes, {} after suppression".format(filename, len(rects), len(pick)))
|
||||||
|
|
||||||
|
# show the output images
|
||||||
|
#cv2.imshow("Before NMS", orig)
|
||||||
|
#cv2.imshow("After NMS", image)
|
||||||
|
#cv2.waitKey(0)
|
@ -1,3 +1,5 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
from imutils.video import VideoStream
|
from imutils.video import VideoStream
|
||||||
from imutils.video import FPS
|
from imutils.video import FPS
|
||||||
import argparse
|
import argparse
|
||||||
@ -58,8 +60,10 @@ differ = None
|
|||||||
now = ''
|
now = ''
|
||||||
framecounter = 0
|
framecounter = 0
|
||||||
trackeron = 0
|
trackeron = 0
|
||||||
|
people_count_total = 0
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
people_count_per_frame = 0
|
||||||
frame = vs.read()
|
frame = vs.read()
|
||||||
frame = frame if args.get("video", None) is None else frame[1]
|
frame = frame if args.get("video", None) is None else frame[1]
|
||||||
# if the frame can not be grabbed, then we have reached the end of the video
|
# if the frame can not be grabbed, then we have reached the end of the video
|
||||||
@ -87,8 +91,12 @@ while True:
|
|||||||
|
|
||||||
# dilate the thresholded image to fill in holes, then find contours on thresholded image
|
# dilate the thresholded image to fill in holes, then find contours on thresholded image
|
||||||
thresh = cv2.dilate(thresh, None, iterations=2)
|
thresh = cv2.dilate(thresh, None, iterations=2)
|
||||||
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
|
thresh = np.uint8(thresh)
|
||||||
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
|
cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
|
||||||
|
#cnts = cnts if imutils.is_cv2() else im2
|
||||||
|
#print(len(cnts))
|
||||||
|
#if len(cnts) > 1:
|
||||||
|
#cnts = cnts[0] if imutils.is_cv2() else cnts[1]
|
||||||
|
|
||||||
# loop over the contours identified
|
# loop over the contours identified
|
||||||
contourcount = 0
|
contourcount = 0
|
||||||
@ -103,8 +111,8 @@ while True:
|
|||||||
(x, y, w, h) = cv2.boundingRect(c)
|
(x, y, w, h) = cv2.boundingRect(c)
|
||||||
initBB2 =(x,y,w,h)
|
initBB2 =(x,y,w,h)
|
||||||
|
|
||||||
prott1 = r'ML-Models\MobileNetSSD_deploy.prototxt'
|
prott1 = r'ML-Models/MobileNetSSD_deploy.prototxt'
|
||||||
prott2 = r'ML-Models\MobileNetSSD_deploy.caffemodel'
|
prott2 = r'ML-Models/MobileNetSSD_deploy.caffemodel'
|
||||||
net = cv2.dnn.readNetFromCaffe(prott1, prott2)
|
net = cv2.dnn.readNetFromCaffe(prott1, prott2)
|
||||||
|
|
||||||
CLASSES = ["person"]
|
CLASSES = ["person"]
|
||||||
@ -120,22 +128,28 @@ while True:
|
|||||||
for i in np.arange(0, detections.shape[2]):
|
for i in np.arange(0, detections.shape[2]):
|
||||||
confidence = detections[0, 0, i, 2]
|
confidence = detections[0, 0, i, 2]
|
||||||
|
|
||||||
confidence_level = 0.7
|
confidence_level = 0.8
|
||||||
|
|
||||||
if confidence > confidence_level:
|
if confidence > confidence_level:
|
||||||
|
people_count_per_frame+=1
|
||||||
|
people_count_total+=1
|
||||||
# extract the index of the class label from the `detections`, then compute the (x, y)-coordinates of
|
# extract the index of the class label from the `detections`, then compute the (x, y)-coordinates of
|
||||||
# the bounding box for the object
|
# the bounding box for the object
|
||||||
idx = int(detections[0, 0, i, 1])
|
idx = int(detections[0, 0, i, 1])
|
||||||
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
|
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
|
||||||
(startX, startY, endX, endY) = box.astype("int")
|
(startX, startY, endX, endY) = box.astype("int")
|
||||||
# draw the prediction on the frame
|
# draw the prediction on the frame
|
||||||
label = "{}: {:.2f}%".format(CLASSES[idx],
|
|
||||||
confidence * 100)
|
#label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
|
||||||
cv2.rectangle(frame, (startX, startY), (endX, endY),
|
label = "{}: {:.2f}%".format(CLASSES[0], confidence * 100)
|
||||||
COLORS[idx], 2)
|
|
||||||
|
#cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
|
||||||
|
cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[0], 2)
|
||||||
|
|
||||||
y = startY - 15 if startY - 15 > 15 else startY + 15
|
y = startY - 15 if startY - 15 > 15 else startY + 15
|
||||||
cv2.putText(frame, label, (startX, y),
|
|
||||||
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
|
#cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
|
||||||
|
cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[0], 2)
|
||||||
|
|
||||||
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
|
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
|
||||||
# Start tracker
|
# Start tracker
|
||||||
@ -146,7 +160,7 @@ while True:
|
|||||||
|
|
||||||
|
|
||||||
# check to see if we are currently tracking an object, if so, ignore other boxes
|
# check to see if we are currently tracking an object, if so, ignore other boxes
|
||||||
# this code is relevant if we want to identify particular persons (section 2 of this tutorial)
|
# this code is relevant if we want to identify particular persons
|
||||||
if initBB2 is not None:
|
if initBB2 is not None:
|
||||||
|
|
||||||
# grab the new bounding box coordinates of the object
|
# grab the new bounding box coordinates of the object
|
||||||
@ -173,6 +187,8 @@ while True:
|
|||||||
info = [
|
info = [
|
||||||
("Success", "Yes" if success else "No"),
|
("Success", "Yes" if success else "No"),
|
||||||
("FPS", "{:.2f}".format(fps.fps())),
|
("FPS", "{:.2f}".format(fps.fps())),
|
||||||
|
("People Frame", "{}".format(people_count_per_frame)),
|
||||||
|
("People Total", "{}".format(people_count_total))
|
||||||
]
|
]
|
||||||
|
|
||||||
# loop over the info tuples and draw them on our frame
|
# loop over the info tuples and draw them on our frame
|
||||||
@ -183,8 +199,7 @@ while True:
|
|||||||
# draw the text and timestamp on the frame
|
# draw the text and timestamp on the frame
|
||||||
now2 = datetime.now()
|
now2 = datetime.now()
|
||||||
time_passed_seconds = str((now2-now).seconds)
|
time_passed_seconds = str((now2-now).seconds)
|
||||||
cv2.putText(frame, 'Detecting persons',(10, 20),
|
cv2.putText(frame, 'Detecting persons',(10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
||||||
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
|
||||||
|
|
||||||
# show the frame and record if the user presses a key
|
# show the frame and record if the user presses a key
|
||||||
cv2.imshow("Video stream", frame)
|
cv2.imshow("Video stream", frame)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user