Compare commits

..

No commits in common. "870c27d0cb7c5e8876746c0486df694432fa9bd9" and "1d84ca43aedb24c55680c8f060e7a80deda450cc" have entirely different histories.

3 changed files with 5 additions and 10 deletions

3
.gitignore vendored
View File

@ -4,6 +4,3 @@ venv/
camera/venv camera/venv
camera/images camera/images
camera/videos camera/videos
*.jpg
*.h264
*.mp4

View File

@ -49,7 +49,6 @@ for imagePath in paths.list_images(args["images"]):
print("[INFO] {}: {} original boxes, {} after suppression".format(filename, len(rects), len(pick))) print("[INFO] {}: {} original boxes, {} after suppression".format(filename, len(rects), len(pick)))
# show the output images # show the output images
if len(pick): #cv2.imshow("Before NMS", orig)
#cv2.imshow("Before NMS", orig) #cv2.imshow("After NMS", image)
cv2.imshow("After NMS", image) #cv2.waitKey(0)
cv2.waitKey(0)

View File

@ -50,7 +50,6 @@ if args.get("video", None) is None:
# otherwise, we are reading from a video file # otherwise, we are reading from a video file
else: else:
vs = cv2.VideoCapture(args["video"]) vs = cv2.VideoCapture(args["video"])
#vs.set(cv2.CAP_PROP_FPS, 2)
"""" Analyzing video frames """ """" Analyzing video frames """
# loop over the frames of the video, and store corresponding information from each frame # loop over the frames of the video, and store corresponding information from each frame
@ -93,7 +92,7 @@ while True:
# dilate the thresholded image to fill in holes, then find contours on thresholded image # dilate the thresholded image to fill in holes, then find contours on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2) thresh = cv2.dilate(thresh, None, iterations=2)
thresh = np.uint8(thresh) thresh = np.uint8(thresh)
_, cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#cnts = cnts if imutils.is_cv2() else im2 #cnts = cnts if imutils.is_cv2() else im2
#print(len(cnts)) #print(len(cnts))
#if len(cnts) > 1: #if len(cnts) > 1:
@ -121,7 +120,7 @@ while True:
trackbox = frame[y:y+h, x:x+w] trackbox = frame[y:y+h, x:x+w]
trackbox = cv2.resize(trackbox, (224, 224)) trackbox = cv2.resize(trackbox, (224, 224))
#cv2.imshow('image',trackbox) cv2.imshow('image',trackbox)
blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5) blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5)
net.setInput(blob) net.setInput(blob)
detections = net.forward() detections = net.forward()