Smart-Home am Beispiel der Präsenzerkennung im Raum Projektarbeit Lennart Heimbs, Johannes Krug, Sebastian Dohle und Kevin Holzschuh bei Prof. Oliver Hofmann SS2019
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

counter_people.py 5.8KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. #!/usr/bin/env python
  2. import argparse
  3. import numpy as np
  4. import cv2
  5. import imutils
  6. from imutils.object_detection import non_max_suppression
  7. from video_stream import imagezmq
  8. '''
  9. Usage:
  10. python peopleCounter.py -i PATH_TO_IMAGE # Reads and detect people in a single local stored image
  11. python peopleCounter.py -c # Attempts to detect people using webcam
  12. '''
  13. HOGCV = cv2.HOGDescriptor()
  14. HOGCV.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
  15. #HOGCV.set
  16. VERBOSITY = False
  17. def detector(image):
  18. '''
  19. @image is a numpy array
  20. '''
  21. clone = image.copy()
  22. (rects, _) = HOGCV.detectMultiScale(image, winStride=(2, 2), padding=(8, 8), scale=1.05)
  23. # draw the original bounding boxes
  24. for (x, y, w, h) in rects:
  25. cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 0, 255), 2)
  26. # Applies non-max supression from imutils package to kick-off overlapped
  27. # boxes
  28. rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
  29. result = non_max_suppression(rects, probs=None, overlapThresh=0.65)
  30. return result
  31. def args_parser():
  32. ''' images, videos, remote or a local camera feed allowed
  33. verbose for added debugging'''
  34. ap = argparse.ArgumentParser()
  35. ap.add_argument("-i", "--image", default=None,
  36. help="path to image test file directory")
  37. ap.add_argument("-c", "--camera", action="store_true", default=False,
  38. help="Set as true if you wish to use the camera")
  39. ap.add_argument("-v", "--video", default=None,
  40. help="path to the video file")
  41. ap.add_argument("-r", "--remote", action="store_true", default=False,
  42. help="video comes from remote source via imagezmq")
  43. ap.add_argument("--verbose", action="store_true", default=False,
  44. help="increase output verbosity")
  45. args = vars(ap.parse_args())
  46. if args["verbose"]:
  47. VERBOSITY = True
  48. return args
  49. def usage():
  50. print("usage: counter_people.py [-h] [-i IMAGE] [-c] [-v] [-r REMOTE] [--verbose]")
  51. print()
  52. print("optional arguments:")
  53. print(" -h, --help show this help message and exit")
  54. print(" -i IMAGE, --image IMAGE")
  55. print(" path to image test file directory")
  56. print(" -c, --camera Set as true if you wish to use the camera")
  57. print(" -v, --video path to the video file")
  58. print(" -r REMOTE, --remote REMOTE")
  59. print(" video comes from remote source via imagezmq")
  60. print(" --verbose increase output verbosity")
  61. def localDetect(image_path):
  62. result = []
  63. image = cv2.imread(image_path)
  64. image = imutils.resize(image, width=min(400, image.shape[1]))
  65. if len(image) <= 0:
  66. print("[ERROR] could not read local image")
  67. return result
  68. print("[INFO] Detecting people")
  69. result = detector(image)
  70. if VERBOSITY:
  71. # shows the result
  72. for (xA, yA, xB, yB) in result:
  73. cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
  74. cv2.imshow("result", image)
  75. cv2.waitKey(0)
  76. cv2.destroyWindow("result")
  77. #cv2.imwrite("result.png", np.hstack((clone, image)))
  78. return result#(result, image)
  79. def videoDetect(cap):
  80. while True:
  81. # Capture frame-by-frame
  82. _, frame = cap.read()
  83. if frame is None:
  84. break
  85. frame = imutils.resize(frame, width=min(400, frame.shape[1]))
  86. result = detector(frame.copy())
  87. # shows the result
  88. for (xA, yA, xB, yB) in result:
  89. cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
  90. if VERBOSITY:
  91. cv2.imshow('frame', frame)
  92. cv2.waitKey(0)
  93. #if time.time() - init >= sample_time:
  94. if result:
  95. print("{} people detected.".format(len(result)))
  96. #init = time.time()
  97. if cv2.waitKey(1) & 0xFF == ord('q'):
  98. break
  99. # When everything done, release the capture
  100. cap.release()
  101. cv2.destroyAllWindows()
  102. def remoteDetect(image_hub):
  103. while True:
  104. rpi_name, frame = image_hub.recv_image()
  105. image_hub.send_reply(b'OK')
  106. frame = imutils.resize(frame, width=min(400, frame.shape[1]))
  107. result = detector(frame.copy())
  108. # shows the result
  109. for (xA, yA, xB, yB) in result:
  110. cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
  111. #if VERBOSITY:
  112. cv2.imshow('frame', frame)
  113. #cv2.waitKey(0)
  114. #if time.time() - init >= sample_time:
  115. if len(result):
  116. print("{} people detected.".format(len(result)))
  117. #init = time.time()
  118. if cv2.waitKey(1) & 0xFF == ord('q'):
  119. break
  120. #cv2.imshow(rpi_name, frame) # 1 window for each RPi
  121. #cv2.waitKey(1)
  122. def detectPeople(args):
  123. image_path = args["image"]
  124. video_path = args["video"]
  125. camera = True if args["camera"] else False
  126. remote = True if args["remote"] else False
  127. # Routine to read local image
  128. if image_path is not None:
  129. print("[INFO] Image path provided, attempting to read image")
  130. (result, image) = localDetect(image_path)
  131. print(str(len(result)) + " People detected.")
  132. elif video_path is not None:
  133. print("[INFO] Video path provided, reading video")
  134. cap = cv2.VideoCapture(video_path)
  135. videoDetect(cap)
  136. # Routine to read images from webcam
  137. elif camera:
  138. print("[INFO] Reading images from local camera")
  139. cap = cv2.VideoCapture(0)
  140. videoDetect(cap)
  141. elif remote:
  142. print("[INFO] Reading images from remote stream")
  143. image_hub = imagezmq.ImageHub()
  144. remoteDetect(image_hub)
  145. else:
  146. usage()
  147. def main():
  148. args = args_parser()
  149. detectPeople(args)
  150. if __name__ == '__main__':
  151. main()