Smart-Home am Beispiel der Präsenzerkennung im Raum Projektarbeit Lennart Heimbs, Johannes Krug, Sebastian Dohle und Kevin Holzschuh bei Prof. Oliver Hofmann SS2019
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

video_presence.py 9.1KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. #!/usr/bin/env python
  2. from imutils.video import VideoStream
  3. from imutils.video import FPS
  4. import argparse
  5. import imutils
  6. import time
  7. import cv2
  8. from datetime import datetime, time
  9. import numpy as np
  10. import time as time2
  11. VISUAL_DEBUG=True
  12. """ Arguments """
  13. ap = argparse.ArgumentParser()
  14. ap.add_argument("-v", "--video", help="path to the video file")
  15. ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
  16. ap.add_argument("-t", "--tracker", type=str, default="csrt", help="OpenCV object tracker type")
  17. args = vars(ap.parse_args())
  18. """ Determine opencv version and select tracker """
  19. # extract the OpenCV version info
  20. (major, minor) = cv2.__version__.split(".")[:2]
  21. # if we are using OpenCV 3.2 or an earlier version, we can use a special factory
  22. # function to create the entity that tracks objects
  23. if int(major) == 3 and int(minor) < 3:
  24. tracker = cv2.Tracker_create(args["tracker"].upper())
  25. #tracker = cv2.TrackerGOTURN_create()
  26. # otherwise, for OpenCV 3.3 or newer,
  27. # we need to explicity call the respective constructor that contains the tracker object:
  28. else:
  29. # initialize a dictionary that maps strings to their corresponding
  30. # OpenCV object tracker implementations
  31. OPENCV_OBJECT_TRACKERS = {
  32. "csrt": cv2.TrackerCSRT_create,
  33. "kcf": cv2.TrackerKCF_create,
  34. "boosting": cv2.TrackerBoosting_create,
  35. "mil": cv2.TrackerMIL_create,
  36. "tld": cv2.TrackerTLD_create,
  37. "medianflow": cv2.TrackerMedianFlow_create,
  38. "mosse": cv2.TrackerMOSSE_create
  39. }
  40. # grab the appropriate object tracker using our dictionary of
  41. # OpenCV object tracker objects
  42. tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
  43. #tracker = cv2.TrackerGOTURN_create()
  44. # if the video argument is None, then the code will read from webcam (work in progress)
  45. if args.get("video", None) is None:
  46. vs = VideoStream(src=0).start()
  47. time2.sleep(2.0)
  48. # otherwise, we are reading from a video file
  49. else:
  50. vs = cv2.VideoCapture(args["video"])
  51. """" Analyzing video frames """
  52. # loop over the frames of the video, and store corresponding information from each frame
  53. firstFrame = None
  54. initBB2 = None
  55. fps = None
  56. differ = None
  57. now = ''
  58. framecounter = 0
  59. trackeron = 0
  60. people_count_total = 0
  61. cv2.namedWindow('Video stream', cv2.WINDOW_NORMAL)
  62. if VISUAL_DEBUG:
  63. cv2.namedWindow('debug image', cv2.WINDOW_NORMAL)
  64. while True:
  65. if VISUAL_DEBUG:
  66. print("Frame {}".format(framecounter))
  67. people_count_per_frame = 0
  68. frame = vs.read()
  69. frame = frame if args.get("video", None) is None else frame[1]
  70. # if the frame can not be grabbed, then we have reached the end of the video
  71. if frame is None:
  72. break
  73. # resize the frame to 500
  74. frame = imutils.resize(frame, width=500)
  75. framecounter = framecounter+1
  76. if framecounter > 1:
  77. (H, W) = frame.shape[:2]
  78. gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
  79. gray = cv2.GaussianBlur(gray, (21, 21), 0)
  80. # if the first frame is None, initialize it
  81. if firstFrame is None:
  82. firstFrame = gray
  83. continue
  84. # compute the absolute difference between the current frame and first frame
  85. frameDelta = cv2.absdiff(firstFrame, gray)
  86. thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
  87. #debug
  88. if VISUAL_DEBUG:
  89. cv2.imshow("debug image", thresh)
  90. cv2.waitKey(0)
  91. #cv2.destroyWindow("threshhold image")
  92. # dilate the thresholded image to fill in holes
  93. thresh = cv2.dilate(thresh, None, iterations=2)
  94. # find contours on thresholded image
  95. thresh = np.uint8(thresh)
  96. cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
  97. if VISUAL_DEBUG:
  98. """img = cv2.drawContours(thresh.copy(), cnts, -1, (0,255,0), 3)
  99. cv2.imshow("debug image", img)
  100. cv2.waitKey(0)"""
  101. print(len(cnts))
  102. # loop over the contours identified
  103. contourcount = 0
  104. for c in cnts:
  105. contourcount = contourcount + 1
  106. # if the contour is too small, ignore it
  107. if cv2.contourArea(c) < args["min_area"]:
  108. continue
  109. # compute the bounding box for the contour, draw it on the frame,
  110. (x, y, w, h) = cv2.boundingRect(c)
  111. initBB2 =(x,y,w,h)
  112. prott1 = r'ML-Models/MobileNetSSD_deploy.prototxt'
  113. prott2 = r'ML-Models/MobileNetSSD_deploy.caffemodel'
  114. net = cv2.dnn.readNetFromCaffe(prott1, prott2)
  115. CLASSES = ["person"]
  116. COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
  117. trackbox = frame[y:y+h, x:x+w]
  118. trackbox = cv2.resize(trackbox, (224, 224))
  119. #cv2.imshow('image',trackbox)
  120. """if VISUAL_DEBUG:
  121. trackbox2 = thresh[y:y+h, x:x+w]
  122. trackbox2 = cv2.resize(trackbox2, (224, 224))
  123. cv2.imshow('debug image',trackbox2)
  124. cv2.waitKey(0)"""
  125. blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5)
  126. net.setInput(blob)
  127. detections = net.forward()
  128. for i in np.arange(0, detections.shape[2]):
  129. confidence = detections[0, 0, i, 2]
  130. confidence_level = 0.95
  131. if confidence > confidence_level:
  132. people_count_per_frame+=1
  133. people_count_total+=1
  134. # extract the index of the class label from the `detections`, then compute the (x, y)-coordinates of
  135. # the bounding box for the object
  136. idx = int(detections[0, 0, i, 1])
  137. box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
  138. (startX, startY, endX, endY) = box.astype("int")
  139. # draw the prediction on the frame
  140. #label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
  141. label = "{}: {:.2f}%".format(CLASSES[0], confidence * 100)
  142. #cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
  143. cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[0], 2)
  144. y = startY - 15 if startY - 15 > 15 else startY + 15
  145. #cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
  146. cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[0], 2)
  147. if VISUAL_DEBUG:
  148. print("person found")
  149. cv2.imshow("debug image", frame)
  150. key = cv2.waitKey(0)
  151. cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
  152. # Start tracker
  153. now = datetime.now()
  154. if differ == None or differ > 9:
  155. tracker.init(frame, initBB2)
  156. fps = FPS().start()
  157. # check to see if we are currently tracking an object, if so, ignore other boxes
  158. # this code is relevant if we want to identify particular persons
  159. """if initBB2 is not None:
  160. # grab the new bounding box coordinates of the object
  161. (success, box) = tracker.update(frame)
  162. # check to see if the tracking was a success
  163. differ = 10
  164. if success:
  165. (x, y, w, h) = [int(v) for v in box]
  166. cv2.rectangle(frame, (x, y), (x + w, y + h),(0, 255, 0), 2)
  167. differ = abs(initBB2[0]-box[0]) + abs(initBB2[1]-box[1])
  168. i = tracker.update(lastframe)
  169. if i[0] != True:
  170. time2.sleep(4000)
  171. else:
  172. trackeron = 1
  173. # update the FPS counter
  174. fps.update()
  175. fps.stop()
  176. # initialize the set of information we'll be displaying on
  177. # the frame
  178. info = [
  179. ("Success", "Yes" if success else "No"),
  180. ("FPS", "{:.2f}".format(fps.fps())),
  181. ("People Frame", "{}".format(people_count_per_frame)),
  182. ("People Total", "{}".format(people_count_total))
  183. ]
  184. # loop over the info tuples and draw them on our frame
  185. for (i, (k, v)) in enumerate(info):
  186. text = "{}: {}".format(k, v)
  187. cv2.putText(frame, text, (10, H - ((i * 20) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
  188. # draw the text and timestamp on the frame
  189. now2 = datetime.now()
  190. time_passed_seconds = str((now2-now).seconds)
  191. cv2.putText(frame, 'Detecting persons',(10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)"""
  192. # show the frame and record if the user presses a key
  193. cv2.imshow("Video stream", frame)
  194. key = cv2.waitKey(1) & 0xFF
  195. # if the `q` key is pressed, break from the lop
  196. if key == ord("q"):
  197. break
  198. if key == ord("d"):
  199. firstFrame = None
  200. lastframe = frame
  201. # finally, stop the camera/stream and close any open windows
  202. vs.stop() if args.get("video", None) is None else vs.release()
  203. cv2.destroyAllWindows()