Development of an internal social media platform with personalised dashboards for students
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

gthread.py 12KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. # -*- coding: utf-8 -
  2. #
  3. # This file is part of gunicorn released under the MIT license.
  4. # See the NOTICE for more information.
  5. # design:
  6. # a threaded worker accepts connections in the main loop, accepted
  7. # connections are are added to the thread pool as a connection job. On
  8. # keepalive connections are put back in the loop waiting for an event.
  9. # If no event happen after the keep alive timeout, the connectoin is
  10. # closed.
  11. from collections import deque
  12. from datetime import datetime
  13. import errno
  14. from functools import partial
  15. import os
  16. import socket
  17. import ssl
  18. import sys
  19. from threading import RLock
  20. import time
  21. from .. import http
  22. from ..http import wsgi
  23. from .. import util
  24. from . import base
  25. from .. import six
  26. try:
  27. import concurrent.futures as futures
  28. except ImportError:
  29. raise RuntimeError("""
  30. You need to install the 'futures' package to use this worker with this
  31. Python version.
  32. """)
  33. try:
  34. from asyncio import selectors
  35. except ImportError:
  36. from gunicorn import selectors
  37. class TConn(object):
  38. def __init__(self, cfg, sock, client, server):
  39. self.cfg = cfg
  40. self.sock = sock
  41. self.client = client
  42. self.server = server
  43. self.timeout = None
  44. self.parser = None
  45. # set the socket to non blocking
  46. self.sock.setblocking(False)
  47. def init(self):
  48. self.sock.setblocking(True)
  49. if self.parser is None:
  50. # wrap the socket if needed
  51. if self.cfg.is_ssl:
  52. self.sock = ssl.wrap_socket(self.sock, server_side=True,
  53. **self.cfg.ssl_options)
  54. # initialize the parser
  55. self.parser = http.RequestParser(self.cfg, self.sock)
  56. def set_timeout(self):
  57. # set the timeout
  58. self.timeout = time.time() + self.cfg.keepalive
  59. def close(self):
  60. util.close(self.sock)
  61. def __lt__(self, other):
  62. return self.timeout < other.timeout
  63. __cmp__ = __lt__
  64. class ThreadWorker(base.Worker):
  65. def __init__(self, *args, **kwargs):
  66. super(ThreadWorker, self).__init__(*args, **kwargs)
  67. self.worker_connections = self.cfg.worker_connections
  68. self.max_keepalived = self.cfg.worker_connections - self.cfg.threads
  69. # initialise the pool
  70. self.tpool = None
  71. self.poller = None
  72. self._lock = None
  73. self.futures = deque()
  74. self._keep = deque()
  75. self.nr_conns = 0
  76. @classmethod
  77. def check_config(cls, cfg, log):
  78. max_keepalived = cfg.worker_connections - cfg.threads
  79. if max_keepalived <= 0 and cfg.keepalive:
  80. log.warning("No keepalived connections can be handled. " +
  81. "Check the number of worker connections and threads.")
  82. def init_process(self):
  83. self.tpool = futures.ThreadPoolExecutor(max_workers=self.cfg.threads)
  84. self.poller = selectors.DefaultSelector()
  85. self._lock = RLock()
  86. super(ThreadWorker, self).init_process()
  87. def handle_quit(self, sig, frame):
  88. self.alive = False
  89. # worker_int callback
  90. self.cfg.worker_int(self)
  91. self.tpool.shutdown(False)
  92. time.sleep(0.1)
  93. sys.exit(0)
  94. def _wrap_future(self, fs, conn):
  95. fs.conn = conn
  96. self.futures.append(fs)
  97. fs.add_done_callback(self.finish_request)
  98. def enqueue_req(self, conn):
  99. conn.init()
  100. # submit the connection to a worker
  101. fs = self.tpool.submit(self.handle, conn)
  102. self._wrap_future(fs, conn)
  103. def accept(self, server, listener):
  104. try:
  105. sock, client = listener.accept()
  106. # initialize the connection object
  107. conn = TConn(self.cfg, sock, client, server)
  108. self.nr_conns += 1
  109. # enqueue the job
  110. self.enqueue_req(conn)
  111. except EnvironmentError as e:
  112. if e.errno not in (errno.EAGAIN,
  113. errno.ECONNABORTED, errno.EWOULDBLOCK):
  114. raise
  115. def reuse_connection(self, conn, client):
  116. with self._lock:
  117. # unregister the client from the poller
  118. self.poller.unregister(client)
  119. # remove the connection from keepalive
  120. try:
  121. self._keep.remove(conn)
  122. except ValueError:
  123. # race condition
  124. return
  125. # submit the connection to a worker
  126. self.enqueue_req(conn)
  127. def murder_keepalived(self):
  128. now = time.time()
  129. while True:
  130. with self._lock:
  131. try:
  132. # remove the connection from the queue
  133. conn = self._keep.popleft()
  134. except IndexError:
  135. break
  136. delta = conn.timeout - now
  137. if delta > 0:
  138. # add the connection back to the queue
  139. with self._lock:
  140. self._keep.appendleft(conn)
  141. break
  142. else:
  143. self.nr_conns -= 1
  144. # remove the socket from the poller
  145. with self._lock:
  146. try:
  147. self.poller.unregister(conn.sock)
  148. except EnvironmentError as e:
  149. if e.errno != errno.EBADF:
  150. raise
  151. except KeyError:
  152. # already removed by the system, continue
  153. pass
  154. # close the socket
  155. conn.close()
  156. def is_parent_alive(self):
  157. # If our parent changed then we shut down.
  158. if self.ppid != os.getppid():
  159. self.log.info("Parent changed, shutting down: %s", self)
  160. return False
  161. return True
  162. def run(self):
  163. # init listeners, add them to the event loop
  164. for sock in self.sockets:
  165. sock.setblocking(False)
  166. # a race condition during graceful shutdown may make the listener
  167. # name unavailable in the request handler so capture it once here
  168. server = sock.getsockname()
  169. acceptor = partial(self.accept, server)
  170. self.poller.register(sock, selectors.EVENT_READ, acceptor)
  171. while self.alive:
  172. # notify the arbiter we are alive
  173. self.notify()
  174. # can we accept more connections?
  175. if self.nr_conns < self.worker_connections:
  176. # wait for an event
  177. events = self.poller.select(1.0)
  178. for key, mask in events:
  179. callback = key.data
  180. callback(key.fileobj)
  181. # check (but do not wait) for finished requests
  182. result = futures.wait(self.futures, timeout=0,
  183. return_when=futures.FIRST_COMPLETED)
  184. else:
  185. # wait for a request to finish
  186. result = futures.wait(self.futures, timeout=1.0,
  187. return_when=futures.FIRST_COMPLETED)
  188. # clean up finished requests
  189. for fut in result.done:
  190. self.futures.remove(fut)
  191. if not self.is_parent_alive():
  192. break
  193. # hanle keepalive timeouts
  194. self.murder_keepalived()
  195. self.tpool.shutdown(False)
  196. self.poller.close()
  197. for s in self.sockets:
  198. s.close()
  199. futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
  200. def finish_request(self, fs):
  201. if fs.cancelled():
  202. fs.conn.close()
  203. return
  204. try:
  205. (keepalive, conn) = fs.result()
  206. # if the connection should be kept alived add it
  207. # to the eventloop and record it
  208. if keepalive:
  209. # flag the socket as non blocked
  210. conn.sock.setblocking(False)
  211. # register the connection
  212. conn.set_timeout()
  213. with self._lock:
  214. self._keep.append(conn)
  215. # add the socket to the event loop
  216. self.poller.register(conn.sock, selectors.EVENT_READ,
  217. partial(self.reuse_connection, conn))
  218. else:
  219. self.nr_conns -= 1
  220. conn.close()
  221. except:
  222. # an exception happened, make sure to close the
  223. # socket.
  224. self.nr_conns -= 1
  225. fs.conn.close()
  226. def handle(self, conn):
  227. keepalive = False
  228. req = None
  229. try:
  230. req = six.next(conn.parser)
  231. if not req:
  232. return (False, conn)
  233. # handle the request
  234. keepalive = self.handle_request(req, conn)
  235. if keepalive:
  236. return (keepalive, conn)
  237. except http.errors.NoMoreData as e:
  238. self.log.debug("Ignored premature client disconnection. %s", e)
  239. except StopIteration as e:
  240. self.log.debug("Closing connection. %s", e)
  241. except ssl.SSLError as e:
  242. if e.args[0] == ssl.SSL_ERROR_EOF:
  243. self.log.debug("ssl connection closed")
  244. conn.sock.close()
  245. else:
  246. self.log.debug("Error processing SSL request.")
  247. self.handle_error(req, conn.sock, conn.client, e)
  248. except EnvironmentError as e:
  249. if e.errno not in (errno.EPIPE, errno.ECONNRESET):
  250. self.log.exception("Socket error processing request.")
  251. else:
  252. if e.errno == errno.ECONNRESET:
  253. self.log.debug("Ignoring connection reset")
  254. else:
  255. self.log.debug("Ignoring connection epipe")
  256. except Exception as e:
  257. self.handle_error(req, conn.sock, conn.client, e)
  258. return (False, conn)
  259. def handle_request(self, req, conn):
  260. environ = {}
  261. resp = None
  262. try:
  263. self.cfg.pre_request(self, req)
  264. request_start = datetime.now()
  265. resp, environ = wsgi.create(req, conn.sock, conn.client,
  266. conn.server, self.cfg)
  267. environ["wsgi.multithread"] = True
  268. self.nr += 1
  269. if self.alive and self.nr >= self.max_requests:
  270. self.log.info("Autorestarting worker after current request.")
  271. resp.force_close()
  272. self.alive = False
  273. if not self.cfg.keepalive:
  274. resp.force_close()
  275. elif len(self._keep) >= self.max_keepalived:
  276. resp.force_close()
  277. respiter = self.wsgi(environ, resp.start_response)
  278. try:
  279. if isinstance(respiter, environ['wsgi.file_wrapper']):
  280. resp.write_file(respiter)
  281. else:
  282. for item in respiter:
  283. resp.write(item)
  284. resp.close()
  285. request_time = datetime.now() - request_start
  286. self.log.access(resp, req, environ, request_time)
  287. finally:
  288. if hasattr(respiter, "close"):
  289. respiter.close()
  290. if resp.should_close():
  291. self.log.debug("Closing connection.")
  292. return False
  293. except EnvironmentError:
  294. # pass to next try-except level
  295. six.reraise(*sys.exc_info())
  296. except Exception:
  297. if resp and resp.headers_sent:
  298. # If the requests have already been sent, we should close the
  299. # connection to indicate the error.
  300. self.log.exception("Error handling request")
  301. try:
  302. conn.sock.shutdown(socket.SHUT_RDWR)
  303. conn.sock.close()
  304. except EnvironmentError:
  305. pass
  306. raise StopIteration()
  307. raise
  308. finally:
  309. try:
  310. self.cfg.post_request(self, req, environ, resp)
  311. except Exception:
  312. self.log.exception("Exception in post_request hook")
  313. return True