Smart-Home am Beispiel der Präsenzerkennung im Raum Projektarbeit Lennart Heimbs, Johannes Krug, Sebastian Dohle und Kevin Holzschuh bei Prof. Oliver Hofmann SS2019
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

counter_people.py 5.2KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. #!/usr/bin/env python
  2. import numpy as np
  3. import imutils
  4. import cv2
  5. import argparse
  6. from video_stream import imagezmq
  7. from imutils.object_detection import non_max_suppression
  8. '''
  9. Usage:
  10. python peopleCounter.py -i PATH_TO_IMAGE # Reads and detect people in a single local stored image
  11. python peopleCounter.py -c # Attempts to detect people using webcam
  12. '''
  13. HOGCV = cv2.HOGDescriptor()
  14. HOGCV.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
  15. VERBOSITY = False
  16. def detector(image):
  17. '''
  18. @image is a numpy array
  19. '''
  20. clone = image.copy()
  21. (rects, weights) = HOGCV.detectMultiScale(image, winStride=(4, 4), padding=(8, 8), scale=1.05)
  22. # draw the original bounding boxes
  23. for (x, y, w, h) in rects:
  24. cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 0, 255), 2)
  25. # Applies non-max supression from imutils package to kick-off overlapped
  26. # boxes
  27. rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
  28. result = non_max_suppression(rects, probs=None, overlapThresh=0.65)
  29. return result
  30. def args_parser():
  31. ''' images, videos, remote or a local camera feed allowed
  32. verbose for added debugging'''
  33. ap = argparse.ArgumentParser()
  34. ap.add_argument("-i", "--image", default=None,
  35. help="path to image test file directory")
  36. ap.add_argument("-c", "--camera", action="store_true", default=False,
  37. help="Set as true if you wish to use the camera")
  38. ap.add_argument("-v", "--video", default=None,
  39. help="path to the video file")
  40. ap.add_argument("-r", "--remote", action="store_true", default=False,
  41. help="video comes from remote source via imagezmq")
  42. ap.add_argument("--verbose", action="store_true", default=False,
  43. help="increase output verbosity")
  44. args = vars(ap.parse_args())
  45. if args["verbose"]:
  46. VERBOSITY = True
  47. return args
  48. def usage():
  49. print("usage: counter_people.py [-h] [-i IMAGE] [-c] [-v] [-r REMOTE] [--verbose]")
  50. print()
  51. print("optional arguments:")
  52. print(" -h, --help show this help message and exit")
  53. print(" -i IMAGE, --image IMAGE")
  54. print(" path to image test file directory")
  55. print(" -c, --camera Set as true if you wish to use the camera")
  56. print(" -v, --video path to the video file")
  57. print(" -r REMOTE, --remote REMOTE")
  58. print(" video comes from remote source via imagezmq")
  59. print(" --verbose increase output verbosity")
  60. def localDetect(image_path):
  61. result = []
  62. image = cv2.imread(image_path)
  63. image = imutils.resize(image, width=min(400, image.shape[1]))
  64. if len(image) <= 0:
  65. print("[ERROR] could not read local image")
  66. return result
  67. print("[INFO] Detecting people")
  68. result = detector(image)
  69. if VERBOSITY:
  70. # shows the result
  71. for (xA, yA, xB, yB) in result:
  72. cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
  73. cv2.imshow("result", image)
  74. cv2.waitKey(0)
  75. cv2.destroyWindow("result")
  76. #cv2.imwrite("result.png", np.hstack((clone, image)))
  77. return result#(result, image)
  78. def videoDetect(cap):
  79. while True:
  80. # Capture frame-by-frame
  81. _, frame = cap.read()
  82. if frame is None:
  83. break
  84. frame = imutils.resize(frame, width=min(400, frame.shape[1]))
  85. result = detector(frame.copy())
  86. # shows the result
  87. for (xA, yA, xB, yB) in result:
  88. cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
  89. if VERBOSITY:
  90. cv2.imshow('frame', frame)
  91. cv2.waitKey(0)
  92. #if time.time() - init >= sample_time:
  93. if result:
  94. print("{} people detected.".format(len(result)))
  95. #init = time.time()
  96. if cv2.waitKey(1) & 0xFF == ord('q'):
  97. break
  98. # When everything done, release the capture
  99. cap.release()
  100. cv2.destroyAllWindows()
  101. def remoteDetect(image_hub):
  102. while True:
  103. rpi_name, image = image_hub.recv_image()
  104. cv2.imshow(rpi_name, image) # 1 window for each RPi
  105. cv2.waitKey(1)
  106. image_hub.send_reply(b'OK')
  107. def detectPeople(args):
  108. image_path = args["image"]
  109. video_path = args["video"]
  110. camera = True if args["camera"] else False
  111. remote = True if args["remote"] else False
  112. # Routine to read local image
  113. if image_path is not None:
  114. print("[INFO] Image path provided, attempting to read image")
  115. (result, image) = localDetect(image_path)
  116. print(str(len(result)) + " People detected.")
  117. elif video_path is not None:
  118. print("[INFO] Video path provided, reading video")
  119. cap = cv2.VideoCapture(video_path)
  120. videoDetect(cap)
  121. # Routine to read images from webcam
  122. elif camera:
  123. print("[INFO] Reading images from local camera")
  124. cap = cv2.VideoCapture(0)
  125. videoDetect(cap)
  126. elif remote:
  127. print("[INFO] Reading images from remote stream")
  128. image_hub = imagezmq.ImageHub()
  129. remoteDetect(image_hub)
  130. else:
  131. usage()
  132. def main():
  133. args = args_parser()
  134. detectPeople(args)
  135. if __name__ == '__main__':
  136. main()