Smart-Home am Beispiel der Präsenzerkennung im Raum Projektarbeit Lennart Heimbs, Johannes Krug, Sebastian Dohle und Kevin Holzschuh bei Prof. Oliver Hofmann SS2019
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

video_presence.py 8.2KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. #!/usr/bin/env python
  2. from imutils.video import VideoStream
  3. from imutils.video import FPS
  4. import argparse
  5. import imutils
  6. import time
  7. import cv2
  8. from datetime import datetime, time
  9. import numpy as np
  10. import time as time2
  11. """ Arguments """
  12. ap = argparse.ArgumentParser()
  13. ap.add_argument("-v", "--video", help="path to the video file")
  14. ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
  15. ap.add_argument("-t", "--tracker", type=str, default="csrt", help="OpenCV object tracker type")
  16. args = vars(ap.parse_args())
  17. """ Determine opencv version and select tracker """
  18. # extract the OpenCV version info
  19. (major, minor) = cv2.__version__.split(".")[:2]
  20. # if we are using OpenCV 3.2 or an earlier version, we can use a special factory
  21. # function to create the entity that tracks objects
  22. if int(major) == 3 and int(minor) < 3:
  23. tracker = cv2.Tracker_create(args["tracker"].upper())
  24. #tracker = cv2.TrackerGOTURN_create()
  25. # otherwise, for OpenCV 3.3 or newer,
  26. # we need to explicity call the respective constructor that contains the tracker object:
  27. else:
  28. # initialize a dictionary that maps strings to their corresponding
  29. # OpenCV object tracker implementations
  30. OPENCV_OBJECT_TRACKERS = {
  31. "csrt": cv2.TrackerCSRT_create,
  32. "kcf": cv2.TrackerKCF_create,
  33. "boosting": cv2.TrackerBoosting_create,
  34. "mil": cv2.TrackerMIL_create,
  35. "tld": cv2.TrackerTLD_create,
  36. "medianflow": cv2.TrackerMedianFlow_create,
  37. "mosse": cv2.TrackerMOSSE_create
  38. }
  39. # grab the appropriate object tracker using our dictionary of
  40. # OpenCV object tracker objects
  41. tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
  42. #tracker = cv2.TrackerGOTURN_create()
  43. # if the video argument is None, then the code will read from webcam (work in progress)
  44. if args.get("video", None) is None:
  45. vs = VideoStream(src=0).start()
  46. time2.sleep(2.0)
  47. # otherwise, we are reading from a video file
  48. else:
  49. vs = cv2.VideoCapture(args["video"])
  50. #vs.set(cv2.CAP_PROP_FPS, 2)
  51. """" Analyzing video frames """
  52. # loop over the frames of the video, and store corresponding information from each frame
  53. firstFrame = None
  54. initBB2 = None
  55. fps = None
  56. differ = None
  57. now = ''
  58. framecounter = 0
  59. trackeron = 0
  60. people_count_total = 0
  61. while True:
  62. people_count_per_frame = 0
  63. frame = vs.read()
  64. frame = frame if args.get("video", None) is None else frame[1]
  65. # if the frame can not be grabbed, then we have reached the end of the video
  66. if frame is None:
  67. break
  68. # resize the frame to 500
  69. frame = imutils.resize(frame, width=500)
  70. framecounter = framecounter+1
  71. if framecounter > 1:
  72. (H, W) = frame.shape[:2]
  73. gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
  74. gray = cv2.GaussianBlur(gray, (21, 21), 0)
  75. # if the first frame is None, initialize it
  76. if firstFrame is None:
  77. firstFrame = gray
  78. continue
  79. # compute the absolute difference between the current frame and first frame
  80. frameDelta = cv2.absdiff(firstFrame, gray)
  81. thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
  82. # dilate the thresholded image to fill in holes, then find contours on thresholded image
  83. thresh = cv2.dilate(thresh, None, iterations=2)
  84. thresh = np.uint8(thresh)
  85. _, cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
  86. #cnts = cnts if imutils.is_cv2() else im2
  87. #print(len(cnts))
  88. #if len(cnts) > 1:
  89. #cnts = cnts[0] if imutils.is_cv2() else cnts[1]
  90. # loop over the contours identified
  91. contourcount = 0
  92. for c in cnts:
  93. contourcount = contourcount + 1
  94. # if the contour is too small, ignore it
  95. if cv2.contourArea(c) < args["min_area"]:
  96. continue
  97. # compute the bounding box for the contour, draw it on the frame,
  98. (x, y, w, h) = cv2.boundingRect(c)
  99. initBB2 =(x,y,w,h)
  100. prott1 = r'ML-Models/MobileNetSSD_deploy.prototxt'
  101. prott2 = r'ML-Models/MobileNetSSD_deploy.caffemodel'
  102. net = cv2.dnn.readNetFromCaffe(prott1, prott2)
  103. CLASSES = ["person"]
  104. COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
  105. trackbox = frame[y:y+h, x:x+w]
  106. trackbox = cv2.resize(trackbox, (224, 224))
  107. #cv2.imshow('image',trackbox)
  108. blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5)
  109. net.setInput(blob)
  110. detections = net.forward()
  111. for i in np.arange(0, detections.shape[2]):
  112. confidence = detections[0, 0, i, 2]
  113. confidence_level = 0.8
  114. if confidence > confidence_level:
  115. people_count_per_frame+=1
  116. people_count_total+=1
  117. # extract the index of the class label from the `detections`, then compute the (x, y)-coordinates of
  118. # the bounding box for the object
  119. idx = int(detections[0, 0, i, 1])
  120. box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
  121. (startX, startY, endX, endY) = box.astype("int")
  122. # draw the prediction on the frame
  123. #label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
  124. label = "{}: {:.2f}%".format(CLASSES[0], confidence * 100)
  125. #cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
  126. cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[0], 2)
  127. y = startY - 15 if startY - 15 > 15 else startY + 15
  128. #cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
  129. cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[0], 2)
  130. cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
  131. # Start tracker
  132. now = datetime.now()
  133. if differ == None or differ > 9:
  134. tracker.init(frame, initBB2)
  135. fps = FPS().start()
  136. # check to see if we are currently tracking an object, if so, ignore other boxes
  137. # this code is relevant if we want to identify particular persons
  138. if initBB2 is not None:
  139. # grab the new bounding box coordinates of the object
  140. (success, box) = tracker.update(frame)
  141. # check to see if the tracking was a success
  142. differ = 10
  143. if success:
  144. (x, y, w, h) = [int(v) for v in box]
  145. cv2.rectangle(frame, (x, y), (x + w, y + h),(0, 255, 0), 2)
  146. differ = abs(initBB2[0]-box[0]) + abs(initBB2[1]-box[1])
  147. i = tracker.update(lastframe)
  148. if i[0] != True:
  149. time2.sleep(4000)
  150. else:
  151. trackeron = 1
  152. # update the FPS counter
  153. fps.update()
  154. fps.stop()
  155. # initialize the set of information we'll be displaying on
  156. # the frame
  157. info = [
  158. ("Success", "Yes" if success else "No"),
  159. ("FPS", "{:.2f}".format(fps.fps())),
  160. ("People Frame", "{}".format(people_count_per_frame)),
  161. ("People Total", "{}".format(people_count_total))
  162. ]
  163. # loop over the info tuples and draw them on our frame
  164. for (i, (k, v)) in enumerate(info):
  165. text = "{}: {}".format(k, v)
  166. cv2.putText(frame, text, (10, H - ((i * 20) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
  167. # draw the text and timestamp on the frame
  168. now2 = datetime.now()
  169. time_passed_seconds = str((now2-now).seconds)
  170. cv2.putText(frame, 'Detecting persons',(10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
  171. # show the frame and record if the user presses a key
  172. cv2.imshow("Video stream", frame)
  173. key = cv2.waitKey(1) & 0xFF
  174. # if the `q` key is pressed, break from the lop
  175. if key == ord("q"):
  176. break
  177. if key == ord("d"):
  178. firstFrame = None
  179. lastframe = frame
  180. # finally, stop the camera/stream and close any open windows
  181. vs.stop() if args.get("video", None) is None else vs.release()
  182. cv2.destroyAllWindows()