Smart-Home am Beispiel der Präsenzerkennung im Raum Projektarbeit Lennart Heimbs, Johannes Krug, Sebastian Dohle und Kevin Holzschuh bei Prof. Oliver Hofmann SS2019
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

counter_people.py 5.1KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. #!/usr/bin/env python
  2. import numpy as np
  3. import imutils
  4. import cv2
  5. import argparse
  6. from video_stream import imagezmq
  7. from imutils.object_detection import non_max_suppression
  8. '''
  9. Usage:
  10. python peopleCounter.py -i PATH_TO_IMAGE # Reads and detect people in a single local stored image
  11. python peopleCounter.py -c # Attempts to detect people using webcam
  12. '''
  13. HOGCV = cv2.HOGDescriptor()
  14. HOGCV.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
  15. VERBOSITY = False
  16. def detector(image):
  17. '''
  18. @image is a numpy array
  19. '''
  20. clone = image.copy()
  21. (rects, weights) = HOGCV.detectMultiScale(image, winStride=(4, 4), padding=(8, 8), scale=1.05)
  22. # draw the original bounding boxes
  23. for (x, y, w, h) in rects:
  24. cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 0, 255), 2)
  25. # Applies non-max supression from imutils package to kick-off overlapped
  26. # boxes
  27. rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
  28. result = non_max_suppression(rects, probs=None, overlapThresh=0.65)
  29. return result
  30. def argsParser():
  31. ap = argparse.ArgumentParser()
  32. ap.add_argument("-i", "--image", default=None,
  33. help="path to image test file directory")
  34. ap.add_argument("-c", "--camera", action="store_true", default=False,
  35. help="Set as true if you wish to use the camera")
  36. ap.add_argument("-v", "--video", default=None,
  37. help="path to the video file")
  38. ap.add_argument("-r", "--remote", action="store_true", default=False,
  39. help="video comes from remote source via imagezmq")
  40. ap.add_argument("--verbose", action="store_true", default=False,
  41. help="increase output verbosity")
  42. args = vars(ap.parse_args())
  43. if args["verbose"]:
  44. VERBOSITY = True
  45. return args
  46. def usage():
  47. print("usage: counter_people.py [-h] [-i IMAGE] [-c] [-v] [-r REMOTE] [--verbose]")
  48. print()
  49. print("optional arguments:")
  50. print(" -h, --help show this help message and exit")
  51. print(" -i IMAGE, --image IMAGE")
  52. print(" path to image test file directory")
  53. print(" -c, --camera Set as true if you wish to use the camera")
  54. print(" -v, --video path to the video file")
  55. print(" -r REMOTE, --remote REMOTE")
  56. print(" video comes from remote source via imagezmq")
  57. print(" --verbose increase output verbosity")
  58. def localDetect(image_path):
  59. result = []
  60. image = cv2.imread(image_path)
  61. image = imutils.resize(image, width=min(400, image.shape[1]))
  62. if len(image) <= 0:
  63. print("[ERROR] could not read local image")
  64. return result
  65. print("[INFO] Detecting people")
  66. result = detector(image)
  67. if VERBOSITY:
  68. # shows the result
  69. for (xA, yA, xB, yB) in result:
  70. cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
  71. cv2.imshow("result", image)
  72. cv2.waitKey(0)
  73. cv2.destroyWindow("result")
  74. #cv2.imwrite("result.png", np.hstack((clone, image)))
  75. return result#(result, image)
  76. def videoDetect(cap):
  77. while True:
  78. # Capture frame-by-frame
  79. _, frame = cap.read()
  80. if frame is None:
  81. break
  82. frame = imutils.resize(frame, width=min(400, frame.shape[1]))
  83. result = detector(frame.copy())
  84. # shows the result
  85. for (xA, yA, xB, yB) in result:
  86. cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
  87. if VERBOSITY:
  88. cv2.imshow('frame', frame)
  89. cv2.waitKey(0)
  90. #if time.time() - init >= sample_time:
  91. if result:
  92. print("{} people detected.".format(len(result)))
  93. #init = time.time()
  94. if cv2.waitKey(1) & 0xFF == ord('q'):
  95. break
  96. # When everything done, release the capture
  97. cap.release()
  98. cv2.destroyAllWindows()
  99. def remoteDetect(image_hub):
  100. while True:
  101. rpi_name, image = image_hub.recv_image()
  102. cv2.imshow(rpi_name, image) # 1 window for each RPi
  103. cv2.waitKey(1)
  104. image_hub.send_reply(b'OK')
  105. def detectPeople(args):
  106. image_path = args["image"]
  107. video_path = args["video"]
  108. camera = True if args["camera"] else False
  109. remote = True if args["remote"] else False
  110. # Routine to read local image
  111. if image_path is not None:
  112. print("[INFO] Image path provided, attempting to read image")
  113. (result, image) = localDetect(image_path)
  114. print(str(len(result)) + " People detected.")
  115. elif video_path is not None:
  116. print("[INFO] Video path provided, reading video")
  117. cap = cv2.VideoCapture(video_path)
  118. videoDetect(cap)
  119. # Routine to read images from webcam
  120. elif camera:
  121. print("[INFO] Reading images from local camera")
  122. cap = cv2.VideoCapture(0)
  123. videoDetect(cap)
  124. elif remote:
  125. print("[INFO] Reading images from remote stream")
  126. image_hub = imagezmq.ImageHub()
  127. remoteDetect(image_hub)
  128. else:
  129. usage()
  130. def main():
  131. args = argsParser()
  132. detectPeople(args)
  133. if __name__ == '__main__':
  134. main()