Development of an internal social media platform with personalised dashboards for students
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

common.py 12KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. """
  2. kombu.common
  3. ============
  4. Common Utilities.
  5. """
  6. from __future__ import absolute_import
  7. import os
  8. import socket
  9. import threading
  10. from collections import deque
  11. from contextlib import contextmanager
  12. from functools import partial
  13. from itertools import count
  14. from uuid import uuid4, uuid3, NAMESPACE_OID
  15. from amqp import RecoverableConnectionError
  16. from .entity import Exchange, Queue
  17. from .five import range
  18. from .log import get_logger
  19. from .serialization import registry as serializers
  20. from .utils import uuid
  21. try:
  22. from _thread import get_ident
  23. except ImportError: # pragma: no cover
  24. try: # noqa
  25. from thread import get_ident # noqa
  26. except ImportError: # pragma: no cover
  27. from dummy_thread import get_ident # noqa
  28. __all__ = ['Broadcast', 'maybe_declare', 'uuid',
  29. 'itermessages', 'send_reply',
  30. 'collect_replies', 'insured', 'drain_consumer',
  31. 'eventloop']
  32. #: Prefetch count can't exceed short.
  33. PREFETCH_COUNT_MAX = 0xFFFF
  34. logger = get_logger(__name__)
  35. _node_id = None
  36. def get_node_id():
  37. global _node_id
  38. if _node_id is None:
  39. _node_id = uuid4().int
  40. return _node_id
  41. def generate_oid(node_id, process_id, thread_id, instance):
  42. ent = '%x-%x-%x-%x' % (node_id, process_id, thread_id, id(instance))
  43. return str(uuid3(NAMESPACE_OID, ent))
  44. def oid_from(instance):
  45. return generate_oid(get_node_id(), os.getpid(), get_ident(), instance)
  46. class Broadcast(Queue):
  47. """Convenience class used to define broadcast queues.
  48. Every queue instance will have a unique name,
  49. and both the queue and exchange is configured with auto deletion.
  50. :keyword name: This is used as the name of the exchange.
  51. :keyword queue: By default a unique id is used for the queue
  52. name for every consumer. You can specify a custom queue
  53. name here.
  54. :keyword \*\*kwargs: See :class:`~kombu.Queue` for a list
  55. of additional keyword arguments supported.
  56. """
  57. attrs = Queue.attrs + (('queue', None),)
  58. def __init__(self, name=None, queue=None, auto_delete=True,
  59. exchange=None, alias=None, **kwargs):
  60. queue = queue or 'bcast.%s' % (uuid(),)
  61. return super(Broadcast, self).__init__(
  62. alias=alias or name,
  63. queue=queue,
  64. name=queue,
  65. auto_delete=auto_delete,
  66. exchange=(exchange if exchange is not None
  67. else Exchange(name, type='fanout')),
  68. **kwargs
  69. )
  70. def declaration_cached(entity, channel):
  71. return entity in channel.connection.client.declared_entities
  72. def maybe_declare(entity, channel=None, retry=False, **retry_policy):
  73. is_bound = entity.is_bound
  74. if not is_bound:
  75. assert channel
  76. entity = entity.bind(channel)
  77. if channel is None:
  78. assert is_bound
  79. channel = entity.channel
  80. declared = ident = None
  81. if channel.connection and entity.can_cache_declaration:
  82. declared = channel.connection.client.declared_entities
  83. ident = hash(entity)
  84. if ident in declared:
  85. return False
  86. if retry:
  87. return _imaybe_declare(entity, declared, ident,
  88. channel, **retry_policy)
  89. return _maybe_declare(entity, declared, ident, channel)
  90. def _maybe_declare(entity, declared, ident, channel):
  91. channel = channel or entity.channel
  92. if not channel.connection:
  93. raise RecoverableConnectionError('channel disconnected')
  94. entity.declare()
  95. if declared is not None and ident:
  96. declared.add(ident)
  97. return True
  98. def _imaybe_declare(entity, declared, ident, channel, **retry_policy):
  99. return entity.channel.connection.client.ensure(
  100. entity, _maybe_declare, **retry_policy)(
  101. entity, declared, ident, channel)
  102. def drain_consumer(consumer, limit=1, timeout=None, callbacks=None):
  103. acc = deque()
  104. def on_message(body, message):
  105. acc.append((body, message))
  106. consumer.callbacks = [on_message] + (callbacks or [])
  107. with consumer:
  108. for _ in eventloop(consumer.channel.connection.client,
  109. limit=limit, timeout=timeout, ignore_timeouts=True):
  110. try:
  111. yield acc.popleft()
  112. except IndexError:
  113. pass
  114. def itermessages(conn, channel, queue, limit=1, timeout=None,
  115. callbacks=None, **kwargs):
  116. return drain_consumer(
  117. conn.Consumer(queues=[queue], channel=channel, **kwargs),
  118. limit=limit, timeout=timeout, callbacks=callbacks,
  119. )
  120. def eventloop(conn, limit=None, timeout=None, ignore_timeouts=False):
  121. """Best practice generator wrapper around ``Connection.drain_events``.
  122. Able to drain events forever, with a limit, and optionally ignoring
  123. timeout errors (a timeout of 1 is often used in environments where
  124. the socket can get "stuck", and is a best practice for Kombu consumers).
  125. **Examples**
  126. ``eventloop`` is a generator::
  127. from kombu.common import eventloop
  128. def run(connection):
  129. it = eventloop(connection, timeout=1, ignore_timeouts=True)
  130. next(it) # one event consumed, or timed out.
  131. for _ in eventloop(connection, timeout=1, ignore_timeouts=True):
  132. pass # loop forever.
  133. It also takes an optional limit parameter, and timeout errors
  134. are propagated by default::
  135. for _ in eventloop(connection, limit=1, timeout=1):
  136. pass
  137. .. seealso::
  138. :func:`itermessages`, which is an event loop bound to one or more
  139. consumers, that yields any messages received.
  140. """
  141. for i in limit and range(limit) or count():
  142. try:
  143. yield conn.drain_events(timeout=timeout)
  144. except socket.timeout:
  145. if timeout and not ignore_timeouts: # pragma: no cover
  146. raise
  147. def send_reply(exchange, req, msg,
  148. producer=None, retry=False, retry_policy=None, **props):
  149. """Send reply for request.
  150. :param exchange: Reply exchange
  151. :param req: Original request, a message with a ``reply_to`` property.
  152. :param producer: Producer instance
  153. :param retry: If true must retry according to ``reply_policy`` argument.
  154. :param retry_policy: Retry settings.
  155. :param props: Extra properties
  156. """
  157. producer.publish(
  158. msg, exchange=exchange,
  159. retry=retry, retry_policy=retry_policy,
  160. **dict({'routing_key': req.properties['reply_to'],
  161. 'correlation_id': req.properties.get('correlation_id'),
  162. 'serializer': serializers.type_to_name[req.content_type],
  163. 'content_encoding': req.content_encoding}, **props)
  164. )
  165. def collect_replies(conn, channel, queue, *args, **kwargs):
  166. """Generator collecting replies from ``queue``"""
  167. no_ack = kwargs.setdefault('no_ack', True)
  168. received = False
  169. try:
  170. for body, message in itermessages(conn, channel, queue,
  171. *args, **kwargs):
  172. if not no_ack:
  173. message.ack()
  174. received = True
  175. yield body
  176. finally:
  177. if received:
  178. channel.after_reply_message_received(queue.name)
  179. def _ensure_errback(exc, interval):
  180. logger.error(
  181. 'Connection error: %r. Retry in %ss\n', exc, interval,
  182. exc_info=True,
  183. )
  184. @contextmanager
  185. def _ignore_errors(conn):
  186. try:
  187. yield
  188. except conn.connection_errors + conn.channel_errors:
  189. pass
  190. def ignore_errors(conn, fun=None, *args, **kwargs):
  191. """Ignore connection and channel errors.
  192. The first argument must be a connection object, or any other object
  193. with ``connection_error`` and ``channel_error`` attributes.
  194. Can be used as a function:
  195. .. code-block:: python
  196. def example(connection):
  197. ignore_errors(connection, consumer.channel.close)
  198. or as a context manager:
  199. .. code-block:: python
  200. def example(connection):
  201. with ignore_errors(connection):
  202. consumer.channel.close()
  203. .. note::
  204. Connection and channel errors should be properly handled,
  205. and not ignored. Using this function is only acceptable in a cleanup
  206. phase, like when a connection is lost or at shutdown.
  207. """
  208. if fun:
  209. with _ignore_errors(conn):
  210. return fun(*args, **kwargs)
  211. return _ignore_errors(conn)
  212. def revive_connection(connection, channel, on_revive=None):
  213. if on_revive:
  214. on_revive(channel)
  215. def insured(pool, fun, args, kwargs, errback=None, on_revive=None, **opts):
  216. """Ensures function performing broker commands completes
  217. despite intermittent connection failures."""
  218. errback = errback or _ensure_errback
  219. with pool.acquire(block=True) as conn:
  220. conn.ensure_connection(errback=errback)
  221. # we cache the channel for subsequent calls, this has to be
  222. # reset on revival.
  223. channel = conn.default_channel
  224. revive = partial(revive_connection, conn, on_revive=on_revive)
  225. insured = conn.autoretry(fun, channel, errback=errback,
  226. on_revive=revive, **opts)
  227. retval, _ = insured(*args, **dict(kwargs, connection=conn))
  228. return retval
  229. class QoS(object):
  230. """Thread safe increment/decrement of a channels prefetch_count.
  231. :param callback: Function used to set new prefetch count,
  232. e.g. ``consumer.qos`` or ``channel.basic_qos``. Will be called
  233. with a single ``prefetch_count`` keyword argument.
  234. :param initial_value: Initial prefetch count value.
  235. **Example usage**
  236. .. code-block:: python
  237. >>> from kombu import Consumer, Connection
  238. >>> connection = Connection('amqp://')
  239. >>> consumer = Consumer(connection)
  240. >>> qos = QoS(consumer.qos, initial_prefetch_count=2)
  241. >>> qos.update() # set initial
  242. >>> qos.value
  243. 2
  244. >>> def in_some_thread():
  245. ... qos.increment_eventually()
  246. >>> def in_some_other_thread():
  247. ... qos.decrement_eventually()
  248. >>> while 1:
  249. ... if qos.prev != qos.value:
  250. ... qos.update() # prefetch changed so update.
  251. It can be used with any function supporting a ``prefetch_count`` keyword
  252. argument::
  253. >>> channel = connection.channel()
  254. >>> QoS(channel.basic_qos, 10)
  255. >>> def set_qos(prefetch_count):
  256. ... print('prefetch count now: %r' % (prefetch_count, ))
  257. >>> QoS(set_qos, 10)
  258. """
  259. prev = None
  260. def __init__(self, callback, initial_value):
  261. self.callback = callback
  262. self._mutex = threading.RLock()
  263. self.value = initial_value or 0
  264. def increment_eventually(self, n=1):
  265. """Increment the value, but do not update the channels QoS.
  266. The MainThread will be responsible for calling :meth:`update`
  267. when necessary.
  268. """
  269. with self._mutex:
  270. if self.value:
  271. self.value = self.value + max(n, 0)
  272. return self.value
  273. def decrement_eventually(self, n=1):
  274. """Decrement the value, but do not update the channels QoS.
  275. The MainThread will be responsible for calling :meth:`update`
  276. when necessary.
  277. """
  278. with self._mutex:
  279. if self.value:
  280. self.value -= n
  281. if self.value < 1:
  282. self.value = 1
  283. return self.value
  284. def set(self, pcount):
  285. """Set channel prefetch_count setting."""
  286. if pcount != self.prev:
  287. new_value = pcount
  288. if pcount > PREFETCH_COUNT_MAX:
  289. logger.warn('QoS: Disabled: prefetch_count exceeds %r',
  290. PREFETCH_COUNT_MAX)
  291. new_value = 0
  292. logger.debug('basic.qos: prefetch_count->%s', new_value)
  293. self.callback(prefetch_count=new_value)
  294. self.prev = pcount
  295. return pcount
  296. def update(self):
  297. """Update prefetch count with current value."""
  298. with self._mutex:
  299. return self.set(self.value)