From cc9304365aac9b977e6c4a349ac711a57aa7ded9 Mon Sep 17 00:00:00 2001 From: Lennart Heimbs Date: Sun, 4 Aug 2019 09:14:46 +0200 Subject: [PATCH] Add img/vid to gitignore; minor changes in video/image presence --- .gitignore | 3 +++ camera/image_presence.py | 7 ++++--- camera/video_presence.py | 5 +++-- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index 5ee5bb4..dc8d05a 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,6 @@ venv/ camera/venv camera/images camera/videos +*.jpg +*.h264 +*.mp4 \ No newline at end of file diff --git a/camera/image_presence.py b/camera/image_presence.py index adb87f8..d45a516 100755 --- a/camera/image_presence.py +++ b/camera/image_presence.py @@ -49,6 +49,7 @@ for imagePath in paths.list_images(args["images"]): print("[INFO] {}: {} original boxes, {} after suppression".format(filename, len(rects), len(pick))) # show the output images - #cv2.imshow("Before NMS", orig) - #cv2.imshow("After NMS", image) - #cv2.waitKey(0) + if len(pick): + #cv2.imshow("Before NMS", orig) + cv2.imshow("After NMS", image) + cv2.waitKey(0) diff --git a/camera/video_presence.py b/camera/video_presence.py index 64fa43d..4b08cef 100755 --- a/camera/video_presence.py +++ b/camera/video_presence.py @@ -50,6 +50,7 @@ if args.get("video", None) is None: # otherwise, we are reading from a video file else: vs = cv2.VideoCapture(args["video"]) + #vs.set(cv2.CAP_PROP_FPS, 2) """" Analyzing video frames """ # loop over the frames of the video, and store corresponding information from each frame @@ -92,7 +93,7 @@ while True: # dilate the thresholded image to fill in holes, then find contours on thresholded image thresh = cv2.dilate(thresh, None, iterations=2) thresh = np.uint8(thresh) - cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) + _, cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) #cnts = cnts if imutils.is_cv2() else im2 #print(len(cnts)) #if len(cnts) > 1: @@ -120,7 +121,7 @@ while True: trackbox = frame[y:y+h, x:x+w] trackbox = cv2.resize(trackbox, (224, 224)) - cv2.imshow('image',trackbox) + #cv2.imshow('image',trackbox) blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5) net.setInput(blob) detections = net.forward()