Smart-Home am Beispiel der Präsenzerkennung im Raum Projektarbeit Lennart Heimbs, Johannes Krug, Sebastian Dohle und Kevin Holzschuh bei Prof. Oliver Hofmann SS2019
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

video_presence.py 8.2KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. #!/usr/bin/env python
  2. from imutils.video import VideoStream
  3. from imutils.video import FPS
  4. import argparse
  5. import imutils
  6. import time
  7. import cv2
  8. from datetime import datetime, time
  9. import numpy as np
  10. import time as time2
  11. """ Arguments """
  12. ap = argparse.ArgumentParser()
  13. ap.add_argument("-v", "--video", help="path to the video file")
  14. ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
  15. ap.add_argument("-t", "--tracker", type=str, default="csrt", help="OpenCV object tracker type")
  16. args = vars(ap.parse_args())
  17. """ Determine opencv version and select tracker """
  18. # extract the OpenCV version info
  19. (major, minor) = cv2.__version__.split(".")[:2]
  20. # if we are using OpenCV 3.2 or an earlier version, we can use a special factory
  21. # function to create the entity that tracks objects
  22. if int(major) == 3 and int(minor) < 3:
  23. tracker = cv2.Tracker_create(args["tracker"].upper())
  24. #tracker = cv2.TrackerGOTURN_create()
  25. # otherwise, for OpenCV 3.3 or newer,
  26. # we need to explicity call the respective constructor that contains the tracker object:
  27. else:
  28. # initialize a dictionary that maps strings to their corresponding
  29. # OpenCV object tracker implementations
  30. OPENCV_OBJECT_TRACKERS = {
  31. "csrt": cv2.TrackerCSRT_create,
  32. "kcf": cv2.TrackerKCF_create,
  33. "boosting": cv2.TrackerBoosting_create,
  34. "mil": cv2.TrackerMIL_create,
  35. "tld": cv2.TrackerTLD_create,
  36. "medianflow": cv2.TrackerMedianFlow_create,
  37. "mosse": cv2.TrackerMOSSE_create
  38. }
  39. # grab the appropriate object tracker using our dictionary of
  40. # OpenCV object tracker objects
  41. tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
  42. #tracker = cv2.TrackerGOTURN_create()
  43. # if the video argument is None, then the code will read from webcam (work in progress)
  44. if args.get("video", None) is None:
  45. vs = VideoStream(src=0).start()
  46. time2.sleep(2.0)
  47. # otherwise, we are reading from a video file
  48. else:
  49. vs = cv2.VideoCapture(args["video"])
  50. """" Analyzing video frames """
  51. # loop over the frames of the video, and store corresponding information from each frame
  52. firstFrame = None
  53. initBB2 = None
  54. fps = None
  55. differ = None
  56. now = ''
  57. framecounter = 0
  58. trackeron = 0
  59. people_count_total = 0
  60. while True:
  61. people_count_per_frame = 0
  62. frame = vs.read()
  63. frame = frame if args.get("video", None) is None else frame[1]
  64. # if the frame can not be grabbed, then we have reached the end of the video
  65. if frame is None:
  66. break
  67. # resize the frame to 500
  68. frame = imutils.resize(frame, width=500)
  69. framecounter = framecounter+1
  70. if framecounter > 1:
  71. (H, W) = frame.shape[:2]
  72. gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
  73. gray = cv2.GaussianBlur(gray, (21, 21), 0)
  74. # if the first frame is None, initialize it
  75. if firstFrame is None:
  76. firstFrame = gray
  77. continue
  78. # compute the absolute difference between the current frame and first frame
  79. frameDelta = cv2.absdiff(firstFrame, gray)
  80. thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
  81. # dilate the thresholded image to fill in holes, then find contours on thresholded image
  82. thresh = cv2.dilate(thresh, None, iterations=2)
  83. thresh = np.uint8(thresh)
  84. cnts, im2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
  85. #cnts = cnts if imutils.is_cv2() else im2
  86. #print(len(cnts))
  87. #if len(cnts) > 1:
  88. #cnts = cnts[0] if imutils.is_cv2() else cnts[1]
  89. # loop over the contours identified
  90. contourcount = 0
  91. for c in cnts:
  92. contourcount = contourcount + 1
  93. # if the contour is too small, ignore it
  94. if cv2.contourArea(c) < args["min_area"]:
  95. continue
  96. # compute the bounding box for the contour, draw it on the frame,
  97. (x, y, w, h) = cv2.boundingRect(c)
  98. initBB2 =(x,y,w,h)
  99. prott1 = r'ML-Models/MobileNetSSD_deploy.prototxt'
  100. prott2 = r'ML-Models/MobileNetSSD_deploy.caffemodel'
  101. net = cv2.dnn.readNetFromCaffe(prott1, prott2)
  102. CLASSES = ["person"]
  103. COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
  104. trackbox = frame[y:y+h, x:x+w]
  105. trackbox = cv2.resize(trackbox, (224, 224))
  106. cv2.imshow('image',trackbox)
  107. blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5)
  108. net.setInput(blob)
  109. detections = net.forward()
  110. for i in np.arange(0, detections.shape[2]):
  111. confidence = detections[0, 0, i, 2]
  112. confidence_level = 0.8
  113. if confidence > confidence_level:
  114. people_count_per_frame+=1
  115. people_count_total+=1
  116. # extract the index of the class label from the `detections`, then compute the (x, y)-coordinates of
  117. # the bounding box for the object
  118. idx = int(detections[0, 0, i, 1])
  119. box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
  120. (startX, startY, endX, endY) = box.astype("int")
  121. # draw the prediction on the frame
  122. #label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
  123. label = "{}: {:.2f}%".format(CLASSES[0], confidence * 100)
  124. #cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
  125. cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[0], 2)
  126. y = startY - 15 if startY - 15 > 15 else startY + 15
  127. #cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
  128. cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[0], 2)
  129. cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
  130. # Start tracker
  131. now = datetime.now()
  132. if differ == None or differ > 9:
  133. tracker.init(frame, initBB2)
  134. fps = FPS().start()
  135. # check to see if we are currently tracking an object, if so, ignore other boxes
  136. # this code is relevant if we want to identify particular persons
  137. if initBB2 is not None:
  138. # grab the new bounding box coordinates of the object
  139. (success, box) = tracker.update(frame)
  140. # check to see if the tracking was a success
  141. differ = 10
  142. if success:
  143. (x, y, w, h) = [int(v) for v in box]
  144. cv2.rectangle(frame, (x, y), (x + w, y + h),(0, 255, 0), 2)
  145. differ = abs(initBB2[0]-box[0]) + abs(initBB2[1]-box[1])
  146. i = tracker.update(lastframe)
  147. if i[0] != True:
  148. time2.sleep(4000)
  149. else:
  150. trackeron = 1
  151. # update the FPS counter
  152. fps.update()
  153. fps.stop()
  154. # initialize the set of information we'll be displaying on
  155. # the frame
  156. info = [
  157. ("Success", "Yes" if success else "No"),
  158. ("FPS", "{:.2f}".format(fps.fps())),
  159. ("People Frame", "{}".format(people_count_per_frame)),
  160. ("People Total", "{}".format(people_count_total))
  161. ]
  162. # loop over the info tuples and draw them on our frame
  163. for (i, (k, v)) in enumerate(info):
  164. text = "{}: {}".format(k, v)
  165. cv2.putText(frame, text, (10, H - ((i * 20) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
  166. # draw the text and timestamp on the frame
  167. now2 = datetime.now()
  168. time_passed_seconds = str((now2-now).seconds)
  169. cv2.putText(frame, 'Detecting persons',(10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
  170. # show the frame and record if the user presses a key
  171. cv2.imshow("Video stream", frame)
  172. key = cv2.waitKey(1) & 0xFF
  173. # if the `q` key is pressed, break from the lop
  174. if key == ord("q"):
  175. break
  176. if key == ord("d"):
  177. firstFrame = None
  178. lastframe = frame
  179. # finally, stop the camera/stream and close any open windows
  180. vs.stop() if args.get("video", None) is None else vs.release()
  181. cv2.destroyAllWindows()