diff --git a/.gitignore b/.gitignore index c29838a..3de49d0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ *.pyc camera/venv +camera/images +camera/videos diff --git a/camera/.vscode/launch.json b/camera/.vscode/launch.json new file mode 100644 index 0000000..fa170e0 --- /dev/null +++ b/camera/.vscode/launch.json @@ -0,0 +1,21 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Python: Current File", + "type": "python", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal" + }, + + { + "name": "Python: Current File with args", + "type": "python", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal", + "args": ["-v", "~/Videos/video.h264"] + } + ] +} \ No newline at end of file diff --git a/camera/.vscode/settings.json b/camera/.vscode/settings.json new file mode 100644 index 0000000..5b80df3 --- /dev/null +++ b/camera/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "python.pythonPath": "venv/bin/python" +} \ No newline at end of file diff --git a/camera/ML-Models/MobileNetSSD_deploy.caffemodel b/camera/ML-Models/MobileNetSSD_deploy.caffemodel index 253e501..7104f06 100644 Binary files a/camera/ML-Models/MobileNetSSD_deploy.caffemodel and b/camera/ML-Models/MobileNetSSD_deploy.caffemodel differ diff --git a/camera/person-detection.py b/camera/image_presence.py old mode 100644 new mode 100755 similarity index 98% rename from camera/person-detection.py rename to camera/image_presence.py index 7b83323..adb87f8 --- a/camera/person-detection.py +++ b/camera/image_presence.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + # import the necessary packages from __future__ import print_function from imutils.object_detection import non_max_suppression diff --git a/camera/video_presence.py b/camera/video_presence.py old mode 100644 new mode 100755 index 25c86f3..35d39a4 --- a/camera/video_presence.py +++ b/camera/video_presence.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + from imutils.video import VideoStream from imutils.video import FPS import argparse @@ -87,15 +89,19 @@ while True: # dilate the thresholded image to fill in holes, then find contours on thresholded image thresh = cv2.dilate(thresh, None, iterations=2) - cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) - cnts = cnts[0] if imutils.is_cv2() else cnts[1] + thresh = np.uint8(thresh) + cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) + #cnts = cnts if imutils.is_cv2() else im2 + #print(len(cnts)) + #if len(cnts) > 1: + #cnts = cnts[0] if imutils.is_cv2() else cnts[1] # loop over the contours identified contourcount = 0 for c in cnts: contourcount = contourcount + 1 - # if the contour is too small, ignore it + # if the contour is too small, ignore it if cv2.contourArea(c) < args["min_area"]: continue @@ -103,8 +109,8 @@ while True: (x, y, w, h) = cv2.boundingRect(c) initBB2 =(x,y,w,h) - prott1 = r'ML-Models\MobileNetSSD_deploy.prototxt' - prott2 = r'ML-Models\MobileNetSSD_deploy.caffemodel' + prott1 = r'ML-Models/MobileNetSSD_deploy.prototxt' + prott2 = r'ML-Models/MobileNetSSD_deploy.caffemodel' net = cv2.dnn.readNetFromCaffe(prott1, prott2) CLASSES = ["person"] @@ -129,13 +135,17 @@ while True: box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) (startX, startY, endX, endY) = box.astype("int") # draw the prediction on the frame - label = "{}: {:.2f}%".format(CLASSES[idx], - confidence * 100) - cv2.rectangle(frame, (startX, startY), (endX, endY), - COLORS[idx], 2) + + #label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100) + label = "{}: {:.2f}%".format(CLASSES[0], confidence * 100) + + #cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2) + cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[0], 2) + y = startY - 15 if startY - 15 > 15 else startY + 15 - cv2.putText(frame, label, (startX, y), - cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2) + + #cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2) + cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[0], 2) cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2) # Start tracker @@ -183,8 +193,7 @@ while True: # draw the text and timestamp on the frame now2 = datetime.now() time_passed_seconds = str((now2-now).seconds) - cv2.putText(frame, 'Detecting persons',(10, 20), - cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) + cv2.putText(frame, 'Detecting persons',(10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) # show the frame and record if the user presses a key cv2.imshow("Video stream", frame)