Smart-Home am Beispiel der Präsenzerkennung im Raum Projektarbeit Lennart Heimbs, Johannes Krug, Sebastian Dohle und Kevin Holzschuh bei Prof. Oliver Hofmann SS2019
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

video_presence.py 7.6KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. from imutils.video import VideoStream
  2. from imutils.video import FPS
  3. import argparse
  4. import imutils
  5. import time
  6. import cv2
  7. from datetime import datetime, time
  8. import numpy as np
  9. import time as time2
  10. """ Arguments """
  11. ap = argparse.ArgumentParser()
  12. ap.add_argument("-v", "--video", help="path to the video file")
  13. ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
  14. ap.add_argument("-t", "--tracker", type=str, default="csrt", help="OpenCV object tracker type")
  15. args = vars(ap.parse_args())
  16. """ Determine opencv version and select tracker """
  17. # extract the OpenCV version info
  18. (major, minor) = cv2.__version__.split(".")[:2]
  19. # if we are using OpenCV 3.2 or an earlier version, we can use a special factory
  20. # function to create the entity that tracks objects
  21. if int(major) == 3 and int(minor) < 3:
  22. tracker = cv2.Tracker_create(args["tracker"].upper())
  23. #tracker = cv2.TrackerGOTURN_create()
  24. # otherwise, for OpenCV 3.3 or newer,
  25. # we need to explicity call the respective constructor that contains the tracker object:
  26. else:
  27. # initialize a dictionary that maps strings to their corresponding
  28. # OpenCV object tracker implementations
  29. OPENCV_OBJECT_TRACKERS = {
  30. "csrt": cv2.TrackerCSRT_create,
  31. "kcf": cv2.TrackerKCF_create,
  32. "boosting": cv2.TrackerBoosting_create,
  33. "mil": cv2.TrackerMIL_create,
  34. "tld": cv2.TrackerTLD_create,
  35. "medianflow": cv2.TrackerMedianFlow_create,
  36. "mosse": cv2.TrackerMOSSE_create
  37. }
  38. # grab the appropriate object tracker using our dictionary of
  39. # OpenCV object tracker objects
  40. tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
  41. #tracker = cv2.TrackerGOTURN_create()
  42. # if the video argument is None, then the code will read from webcam (work in progress)
  43. if args.get("video", None) is None:
  44. vs = VideoStream(src=0).start()
  45. time2.sleep(2.0)
  46. # otherwise, we are reading from a video file
  47. else:
  48. vs = cv2.VideoCapture(args["video"])
  49. """" Analyzing video frames """
  50. # loop over the frames of the video, and store corresponding information from each frame
  51. firstFrame = None
  52. initBB2 = None
  53. fps = None
  54. differ = None
  55. now = ''
  56. framecounter = 0
  57. trackeron = 0
  58. while True:
  59. frame = vs.read()
  60. frame = frame if args.get("video", None) is None else frame[1]
  61. # if the frame can not be grabbed, then we have reached the end of the video
  62. if frame is None:
  63. break
  64. # resize the frame to 500
  65. frame = imutils.resize(frame, width=500)
  66. framecounter = framecounter+1
  67. if framecounter > 1:
  68. (H, W) = frame.shape[:2]
  69. gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
  70. gray = cv2.GaussianBlur(gray, (21, 21), 0)
  71. # if the first frame is None, initialize it
  72. if firstFrame is None:
  73. firstFrame = gray
  74. continue
  75. # compute the absolute difference between the current frame and first frame
  76. frameDelta = cv2.absdiff(firstFrame, gray)
  77. thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
  78. # dilate the thresholded image to fill in holes, then find contours on thresholded image
  79. thresh = cv2.dilate(thresh, None, iterations=2)
  80. cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
  81. cnts = cnts[0] if imutils.is_cv2() else cnts[1]
  82. # loop over the contours identified
  83. contourcount = 0
  84. for c in cnts:
  85. contourcount = contourcount + 1
  86. # if the contour is too small, ignore it
  87. if cv2.contourArea(c) < args["min_area"]:
  88. continue
  89. # compute the bounding box for the contour, draw it on the frame,
  90. (x, y, w, h) = cv2.boundingRect(c)
  91. initBB2 =(x,y,w,h)
  92. prott1 = r'ML-Models\MobileNetSSD_deploy.prototxt'
  93. prott2 = r'ML-Models\MobileNetSSD_deploy.caffemodel'
  94. net = cv2.dnn.readNetFromCaffe(prott1, prott2)
  95. CLASSES = ["person"]
  96. COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
  97. trackbox = frame[y:y+h, x:x+w]
  98. trackbox = cv2.resize(trackbox, (224, 224))
  99. cv2.imshow('image',trackbox)
  100. blob = cv2.dnn.blobFromImage(cv2.resize(trackbox, (300, 300)),0.007843, (300, 300), 127.5)
  101. net.setInput(blob)
  102. detections = net.forward()
  103. for i in np.arange(0, detections.shape[2]):
  104. confidence = detections[0, 0, i, 2]
  105. confidence_level = 0.7
  106. if confidence > confidence_level:
  107. # extract the index of the class label from the `detections`, then compute the (x, y)-coordinates of
  108. # the bounding box for the object
  109. idx = int(detections[0, 0, i, 1])
  110. box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
  111. (startX, startY, endX, endY) = box.astype("int")
  112. # draw the prediction on the frame
  113. label = "{}: {:.2f}%".format(CLASSES[idx],
  114. confidence * 100)
  115. cv2.rectangle(frame, (startX, startY), (endX, endY),
  116. COLORS[idx], 2)
  117. y = startY - 15 if startY - 15 > 15 else startY + 15
  118. cv2.putText(frame, label, (startX, y),
  119. cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
  120. cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)
  121. # Start tracker
  122. now = datetime.now()
  123. if differ == None or differ > 9:
  124. tracker.init(frame, initBB2)
  125. fps = FPS().start()
  126. # check to see if we are currently tracking an object, if so, ignore other boxes
  127. # this code is relevant if we want to identify particular persons (section 2 of this tutorial)
  128. if initBB2 is not None:
  129. # grab the new bounding box coordinates of the object
  130. (success, box) = tracker.update(frame)
  131. # check to see if the tracking was a success
  132. differ = 10
  133. if success:
  134. (x, y, w, h) = [int(v) for v in box]
  135. cv2.rectangle(frame, (x, y), (x + w, y + h),(0, 255, 0), 2)
  136. differ = abs(initBB2[0]-box[0]) + abs(initBB2[1]-box[1])
  137. i = tracker.update(lastframe)
  138. if i[0] != True:
  139. time2.sleep(4000)
  140. else:
  141. trackeron = 1
  142. # update the FPS counter
  143. fps.update()
  144. fps.stop()
  145. # initialize the set of information we'll be displaying on
  146. # the frame
  147. info = [
  148. ("Success", "Yes" if success else "No"),
  149. ("FPS", "{:.2f}".format(fps.fps())),
  150. ]
  151. # loop over the info tuples and draw them on our frame
  152. for (i, (k, v)) in enumerate(info):
  153. text = "{}: {}".format(k, v)
  154. cv2.putText(frame, text, (10, H - ((i * 20) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
  155. # draw the text and timestamp on the frame
  156. now2 = datetime.now()
  157. time_passed_seconds = str((now2-now).seconds)
  158. cv2.putText(frame, 'Detecting persons',(10, 20),
  159. cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
  160. # show the frame and record if the user presses a key
  161. cv2.imshow("Video stream", frame)
  162. key = cv2.waitKey(1) & 0xFF
  163. # if the `q` key is pressed, break from the lop
  164. if key == ord("q"):
  165. break
  166. if key == ord("d"):
  167. firstFrame = None
  168. lastframe = frame
  169. # finally, stop the camera/stream and close any open windows
  170. vs.stop() if args.get("video", None) is None else vs.release()
  171. cv2.destroyAllWindows()