From fb7560d92e76ba2947ddc4e4c55e5cc4a6435c4d Mon Sep 17 00:00:00 2001 From: lheimbs Date: Thu, 1 Aug 2019 10:24:44 +0200 Subject: [PATCH] Add Count people --- camera/video_presence.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/camera/video_presence.py b/camera/video_presence.py index 35d39a4..64fa43d 100755 --- a/camera/video_presence.py +++ b/camera/video_presence.py @@ -60,8 +60,10 @@ differ = None now = '' framecounter = 0 trackeron = 0 +people_count_total = 0 while True: + people_count_per_frame = 0 frame = vs.read() frame = frame if args.get("video", None) is None else frame[1] # if the frame can not be grabbed, then we have reached the end of the video @@ -126,9 +128,11 @@ while True: for i in np.arange(0, detections.shape[2]): confidence = detections[0, 0, i, 2] - confidence_level = 0.7 + confidence_level = 0.8 if confidence > confidence_level: + people_count_per_frame+=1 + people_count_total+=1 # extract the index of the class label from the `detections`, then compute the (x, y)-coordinates of # the bounding box for the object idx = int(detections[0, 0, i, 1]) @@ -156,7 +160,7 @@ while True: # check to see if we are currently tracking an object, if so, ignore other boxes - # this code is relevant if we want to identify particular persons (section 2 of this tutorial) + # this code is relevant if we want to identify particular persons if initBB2 is not None: # grab the new bounding box coordinates of the object @@ -183,6 +187,8 @@ while True: info = [ ("Success", "Yes" if success else "No"), ("FPS", "{:.2f}".format(fps.fps())), + ("People Frame", "{}".format(people_count_per_frame)), + ("People Total", "{}".format(people_count_total)) ] # loop over the info tuples and draw them on our frame