Smart-Home am Beispiel der Präsenzerkennung im Raum Projektarbeit Lennart Heimbs, Johannes Krug, Sebastian Dohle und Kevin Holzschuh bei Prof. Oliver Hofmann SS2019
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

counter_people.py 4.5KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. from imutils.object_detection import non_max_suppression
  2. import numpy as np
  3. import imutils
  4. import cv2
  5. import requests
  6. import time
  7. import argparse
  8. import time
  9. import base64
  10. '''
  11. Usage:
  12. python peopleCounter.py -i PATH_TO_IMAGE # Reads and detect people in a single local stored image
  13. python peopleCounter.py -c # Attempts to detect people using webcam
  14. '''
  15. HOGCV = cv2.HOGDescriptor()
  16. HOGCV.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
  17. def detector(image):
  18. '''
  19. @image is a numpy array
  20. '''
  21. clone = image.copy()
  22. (rects, weights) = HOGCV.detectMultiScale(image, winStride=(4, 4),
  23. padding=(8, 8), scale=1.05)
  24. # draw the original bounding boxes
  25. for (x, y, w, h) in rects:
  26. cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 0, 255), 2)
  27. # Applies non-max supression from imutils package to kick-off overlapped
  28. # boxes
  29. rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
  30. result = non_max_suppression(rects, probs=None, overlapThresh=0.65)
  31. return result
  32. def buildPayload(variable, value, context):
  33. return {variable: {"value": value, "context": context}}
  34. def argsParser():
  35. ap = argparse.ArgumentParser()
  36. ap.add_argument("-i", "--image", default=None,
  37. help="path to image test file directory")
  38. ap.add_argument("-c", "--camera", default=False,
  39. help="Set as true if you wish to use the camera")
  40. args = vars(ap.parse_args())
  41. return args
  42. def localDetect(image_path):
  43. result = []
  44. image = cv2.imread(image_path)
  45. image = imutils.resize(image, width=min(400, image.shape[1]))
  46. clone = image.copy()
  47. if len(image) <= 0:
  48. print("[ERROR] could not read local image")
  49. return result
  50. print("[INFO] Detecting people")
  51. result = detector(image)
  52. """# shows the result
  53. for (xA, yA, xB, yB) in result:
  54. cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)
  55. cv2.imshow("result", image)
  56. cv2.waitKey(0)
  57. cv2.destroyAllWindows()
  58. cv2.imwrite("result.png", np.hstack((clone, image)))"""
  59. return result#(result, image)
  60. def cameraDetect(token, device, variable, sample_time=5):
  61. cap = cv2.VideoCapture(0)
  62. init = time.time()
  63. # Allowed sample time for Ubidots is 1 dot/second
  64. if sample_time < 1:
  65. sample_time = 1
  66. while(True):
  67. # Capture frame-by-frame
  68. ret, frame = cap.read()
  69. frame = imutils.resize(frame, width=min(400, frame.shape[1]))
  70. result = detector(frame.copy())
  71. # shows the result
  72. #for (xA, yA, xB, yB) in result:
  73. # cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
  74. #cv2.imshow('frame', frame)
  75. # Sends results
  76. if time.time() - init >= sample_time:
  77. #print("[INFO] Sending actual frame results")
  78. # Converts the image to base 64 and adds it to the context
  79. #b64 = convert_to_base64(frame)
  80. #context = {"image": b64}
  81. if len(result):
  82. print("{} people detected.".format(len(result)))
  83. init = time.time()
  84. if cv2.waitKey(1) & 0xFF == ord('q'):
  85. break
  86. # When everything done, release the capture
  87. cap.release()
  88. cv2.destroyAllWindows()
  89. def convert_to_base64(image):
  90. image = imutils.resize(image, width=400)
  91. img_str = cv2.imencode('.png', image)[1].tostring()
  92. b64 = base64.b64encode(img_str)
  93. return b64.decode('utf-8')
  94. def detectPeople(args):
  95. image_path = args["image"]
  96. camera = True if str(args["camera"]) == 'true' else False
  97. # Routine to read local image
  98. if image_path != None and not camera:
  99. print("[INFO] Image path provided, attempting to read image")
  100. (result, image) = localDetect(image_path)
  101. print("[INFO] sending results")
  102. # Converts the image to base 64 and adds it to the context
  103. b64 = convert_to_base64(image)
  104. context = {"image": b64}
  105. print(len(result))
  106. # Sends the result
  107. """req = sendToUbidots(TOKEN, DEVICE, VARIABLE,
  108. len(result), context=context)
  109. if req.status_code >= 400:
  110. print("[ERROR] Could not send data to Ubidots")
  111. return req"""
  112. # Routine to read images from webcam
  113. if camera:
  114. print("[INFO] reading camera images")
  115. cameraDetect(TOKEN, DEVICE, VARIABLE)
  116. def main():
  117. args = argsParser()
  118. detectPeople(args)
  119. if __name__ == '__main__':
  120. main()