Smart-Home am Beispiel der Präsenzerkennung im Raum Projektarbeit Lennart Heimbs, Johannes Krug, Sebastian Dohle und Kevin Holzschuh bei Prof. Oliver Hofmann SS2019
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

person_detection.py 8.3KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. #!/usr/bin/env python
  2. import argparse
  3. import time
  4. from statistics import median
  5. import imutils
  6. from imutils.video import VideoStream
  7. import cv2
  8. import numpy as np
  9. import paho.mqtt.client as mqtt
  10. from video_stream import imagezmq
  11. VISUAL_DEBUG = True
  12. BROKER = "141.75.33.126"
  13. PORT = 1883
  14. def getArgs():
  15. """ Arguments """
  16. ap = argparse.ArgumentParser()
  17. ap.add_argument("-v", "--video", help="path to the video file")
  18. ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
  19. return vars(ap.parse_args())
  20. def main():
  21. try:
  22. mqtt_client = mqtt.Client("pi-camera")
  23. mqtt_client.connect(BROKER, PORT)
  24. except:
  25. print("Connection to MQTT-Broker failed.")
  26. return 1
  27. try:
  28. args = getArgs()
  29. timer = Timer()
  30. # if the video argument is None, then the code will read from webcam (work in progress)
  31. if args.get("video", None) is None:
  32. #vs = VideoStream(src=0).start()
  33. image_hub = imagezmq.ImageHub()
  34. time.sleep(2.0)
  35. # otherwise, we are reading from a video file
  36. else:
  37. vs = cv2.VideoCapture(args["video"])
  38. cv2.namedWindow('Video stream', cv2.WINDOW_NORMAL)
  39. detector = DetectionFromFrame(args["min_area"], 0.8)
  40. while True:
  41. people_count = 0
  42. timer.start_frame_timer()
  43. if args.get("video", None) is None:
  44. rpi_name, detector.currentFrame = image_hub.recv_image()
  45. image_hub.send_reply(b'OK')
  46. else:
  47. detector.currentFrame = vs.read()
  48. detector.currentFrame = detector.currentFrame if args.get("video", None) is None else detector.currentFrame[1]
  49. # if the frame can not be grabbed, then we have reached the end of the video
  50. if detector.currentFrame is None:
  51. break
  52. # resize the frame to 500
  53. detector.currentFrame = imutils.resize(detector.currentFrame, width=500)
  54. detector.framecounter += 1
  55. if detector.framecounter > 1:
  56. cnts = detector.prepareFrame()
  57. for c in cnts:
  58. bound_rect = cv2.boundingRect(c)
  59. #(x, y, w, h) = cv2.boundingRect(c)
  60. #initBB2 =(x,y,w,h)
  61. prott1 = r'ML-Models/MobileNetSSD_deploy.prototxt'
  62. prott2 = r'ML-Models/MobileNetSSD_deploy.caffemodel'
  63. net = cv2.dnn.readNetFromCaffe(prott1, prott2)
  64. #trackbox = detector.currentFrame[y:y+h, x:x+w]boundRect[1]
  65. trackbox = detector.currentFrame[bound_rect[1]:bound_rect[1]+bound_rect[3],
  66. bound_rect[0]:bound_rect[0]+bound_rect[2]]
  67. trackbox = cv2.resize(trackbox, (224, 224))
  68. #cv2.imshow('image',trackbox)
  69. blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5)
  70. net.setInput(blob)
  71. detections = net.forward()
  72. for i in np.arange(0, detections.shape[2]):
  73. people_count += detector.detectConfidentiallyPeople(i, detections, bound_rect)
  74. cv2.rectangle(detector.currentFrame, (bound_rect[0], bound_rect[1]),
  75. (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3]), (255, 255, 0), 1)
  76. # show the frame and record if the user presses a key
  77. cv2.imshow("Video stream", detector.currentFrame)
  78. key = cv2.waitKey(1) & 0xFF
  79. # send number of people detected via mqtt
  80. mqtt_client.publish("/gso/bb/104/Camera", str(people_count))
  81. # if the `q` key is pressed, break from the lop
  82. if key == ord("q"):
  83. break
  84. if key == ord("d"):
  85. detector.firstFrame = None
  86. #detector.lastFrame = detector.currentFrame
  87. timer.print_frame_time()
  88. # finally, stop the camera/stream and close any open windows
  89. if args.get("video", None) is not None:
  90. vs.stop() if args.get("video", None) is None else vs.release()
  91. cv2.destroyAllWindows()
  92. finally:
  93. if args.get("video", None) is None:
  94. image_hub.send_reply(b'OK')
  95. class DetectionFromFrame:
  96. def __init__(self, min_size, confidence):
  97. self.min_size = min_size
  98. self.confidence_level = confidence
  99. self.firstFrame = None
  100. self.currentFrame = None
  101. self.initBB2 = None
  102. self.fps = None
  103. self.differ = None
  104. self.now = ''
  105. self.framecounter = 0
  106. self.people_count_total = 0
  107. def prepareFrame(self):
  108. gray = cv2.cvtColor(self.currentFrame, cv2.COLOR_BGR2GRAY)
  109. gray = cv2.GaussianBlur(gray, (21, 21), 0)
  110. # if the first frame is None, initialize it
  111. if self.firstFrame is None:
  112. self.firstFrame = gray
  113. return []
  114. # compute the absolute difference between the current frame and first frame
  115. frameDelta = cv2.absdiff(self.firstFrame, gray)
  116. thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
  117. #debug
  118. """if VISUAL_DEBUG:
  119. cv2.imshow("debug image", thresh)
  120. cv2.waitKey(0)
  121. cv2.destroyWindow("debug image")
  122. #cv2.destroyWindow("threshhold image")"""
  123. # dilate the thresholded image to fill in holes
  124. thresh = cv2.dilate(thresh, None, iterations=2)
  125. # find contours on thresholded image
  126. thresh = np.uint8(thresh)
  127. cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
  128. return cnts
  129. def detectConfidentiallyPeople(self, i, detections, bound_rect):
  130. #CLASSES = ["person"]
  131. detected_color = (0, 255, 0)
  132. #COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
  133. confidence = detections[0, 0, i, 2]
  134. if confidence > self.confidence_level:
  135. # draw a rectangle in green over the detected area
  136. cv2.rectangle(self.currentFrame, (bound_rect[0], bound_rect[1]),
  137. (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3]), detected_color, 3)
  138. label = "{:.2f}%".format(confidence * 100)
  139. cv2.putText(self.currentFrame, label, (bound_rect[0], bound_rect[1]-5), cv2.FONT_HERSHEY_SIMPLEX, 0.3, detected_color, 1)
  140. return 1
  141. else:
  142. return 0
  143. class Timer:
  144. def __init__(self):
  145. self.frame_timer = None
  146. self.contour_timer = None
  147. self.detection_timer = None
  148. self.contour_time = []
  149. self.detection_time = []
  150. def start_frame_timer(self):
  151. self.frame_timer = time.time()
  152. def get_frame_time(self):
  153. return time.time() - self.frame_timer
  154. def start_contour_timer(self):
  155. self.contour_timer = time.time()
  156. def stop_contour_timer(self):
  157. self.contour_time.append(time.time() - self.contour_timer)
  158. def start_detection_timer(self):
  159. self.detection_timer = time.time()
  160. def stop_detection_timer(self):
  161. self.detection_time.append(time.time() - self.detection_timer)
  162. def print_frame_time(self):
  163. print("Time for Frame: {:.2f}.".format(self.get_frame_time()))
  164. def print_other_times(self):
  165. average_contour = 0 if not self.contour_time else sum(self.contour_time)/float(len(self.contour_time))
  166. average_detection = 0 if not self.detection_time else sum(self.detection_time)/float(len(self.detection_time))
  167. median_contour = 0 if not self.contour_time else median(self.contour_time)
  168. median_detection = 0 if not self.detection_time else median(self.detection_time)
  169. total_contour = sum(self.contour_time)
  170. total_detection = sum(self.detection_time)
  171. print("Contour Total: {:.2f}. Contour Median: {:.2f}. Contour Average: {:.2f}.".format(
  172. total_contour, median_contour, average_contour))
  173. print("Detection Total: {:.2f}. Detection Median: {:.2f}. Detection Average: {:.2f}. ".format(
  174. total_detection, median_detection, average_detection))
  175. self.contour_time = []
  176. self.detection_time = []
  177. if __name__ == "__main__":
  178. main()