Development of an internal social media platform with personalised dashboards for students
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

amqp.py 11KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. # -*- coding: utf-8 -*-
  2. """
  3. celery.backends.amqp
  4. ~~~~~~~~~~~~~~~~~~~~
  5. The AMQP result backend.
  6. This backend publishes results as messages.
  7. """
  8. from __future__ import absolute_import
  9. import socket
  10. from collections import deque
  11. from operator import itemgetter
  12. from kombu import Exchange, Queue, Producer, Consumer
  13. from celery import states
  14. from celery.exceptions import TimeoutError
  15. from celery.five import range, monotonic
  16. from celery.utils.functional import dictfilter
  17. from celery.utils.log import get_logger
  18. from celery.utils.timeutils import maybe_s_to_ms
  19. from .base import BaseBackend
  20. __all__ = ['BacklogLimitExceeded', 'AMQPBackend']
  21. logger = get_logger(__name__)
  22. class BacklogLimitExceeded(Exception):
  23. """Too much state history to fast-forward."""
  24. def repair_uuid(s):
  25. # Historically the dashes in UUIDS are removed from AMQ entity names,
  26. # but there is no known reason to. Hopefully we'll be able to fix
  27. # this in v4.0.
  28. return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
  29. class NoCacheQueue(Queue):
  30. can_cache_declaration = False
  31. class AMQPBackend(BaseBackend):
  32. """Publishes results by sending messages."""
  33. Exchange = Exchange
  34. Queue = NoCacheQueue
  35. Consumer = Consumer
  36. Producer = Producer
  37. BacklogLimitExceeded = BacklogLimitExceeded
  38. persistent = True
  39. supports_autoexpire = True
  40. supports_native_join = True
  41. retry_policy = {
  42. 'max_retries': 20,
  43. 'interval_start': 0,
  44. 'interval_step': 1,
  45. 'interval_max': 1,
  46. }
  47. def __init__(self, app, connection=None, exchange=None, exchange_type=None,
  48. persistent=None, serializer=None, auto_delete=True, **kwargs):
  49. super(AMQPBackend, self).__init__(app, **kwargs)
  50. conf = self.app.conf
  51. self._connection = connection
  52. self.persistent = self.prepare_persistent(persistent)
  53. self.delivery_mode = 2 if self.persistent else 1
  54. exchange = exchange or conf.CELERY_RESULT_EXCHANGE
  55. exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE
  56. self.exchange = self._create_exchange(
  57. exchange, exchange_type, self.delivery_mode,
  58. )
  59. self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
  60. self.auto_delete = auto_delete
  61. self.expires = None
  62. if 'expires' not in kwargs or kwargs['expires'] is not None:
  63. self.expires = self.prepare_expires(kwargs.get('expires'))
  64. self.queue_arguments = dictfilter({
  65. 'x-expires': maybe_s_to_ms(self.expires),
  66. })
  67. def _create_exchange(self, name, type='direct', delivery_mode=2):
  68. return self.Exchange(name=name,
  69. type=type,
  70. delivery_mode=delivery_mode,
  71. durable=self.persistent,
  72. auto_delete=False)
  73. def _create_binding(self, task_id):
  74. name = self.rkey(task_id)
  75. return self.Queue(name=name,
  76. exchange=self.exchange,
  77. routing_key=name,
  78. durable=self.persistent,
  79. auto_delete=self.auto_delete,
  80. queue_arguments=self.queue_arguments)
  81. def revive(self, channel):
  82. pass
  83. def rkey(self, task_id):
  84. return task_id.replace('-', '')
  85. def destination_for(self, task_id, request):
  86. if request:
  87. return self.rkey(task_id), request.correlation_id or task_id
  88. return self.rkey(task_id), task_id
  89. def store_result(self, task_id, result, status,
  90. traceback=None, request=None, **kwargs):
  91. """Send task return value and status."""
  92. routing_key, correlation_id = self.destination_for(task_id, request)
  93. if not routing_key:
  94. return
  95. with self.app.amqp.producer_pool.acquire(block=True) as producer:
  96. producer.publish(
  97. {'task_id': task_id, 'status': status,
  98. 'result': self.encode_result(result, status),
  99. 'traceback': traceback,
  100. 'children': self.current_task_children(request)},
  101. exchange=self.exchange,
  102. routing_key=routing_key,
  103. correlation_id=correlation_id,
  104. serializer=self.serializer,
  105. retry=True, retry_policy=self.retry_policy,
  106. declare=self.on_reply_declare(task_id),
  107. delivery_mode=self.delivery_mode,
  108. )
  109. return result
  110. def on_reply_declare(self, task_id):
  111. return [self._create_binding(task_id)]
  112. def wait_for(self, task_id, timeout=None, cache=True,
  113. no_ack=True, on_interval=None,
  114. READY_STATES=states.READY_STATES,
  115. PROPAGATE_STATES=states.PROPAGATE_STATES,
  116. **kwargs):
  117. cached_meta = self._cache.get(task_id)
  118. if cache and cached_meta and \
  119. cached_meta['status'] in READY_STATES:
  120. return cached_meta
  121. else:
  122. try:
  123. return self.consume(task_id, timeout=timeout, no_ack=no_ack,
  124. on_interval=on_interval)
  125. except socket.timeout:
  126. raise TimeoutError('The operation timed out.')
  127. def get_task_meta(self, task_id, backlog_limit=1000):
  128. # Polling and using basic_get
  129. with self.app.pool.acquire_channel(block=True) as (_, channel):
  130. binding = self._create_binding(task_id)(channel)
  131. binding.declare()
  132. prev = latest = acc = None
  133. for i in range(backlog_limit): # spool ffwd
  134. acc = binding.get(
  135. accept=self.accept, no_ack=False,
  136. )
  137. if not acc: # no more messages
  138. break
  139. if acc.payload['task_id'] == task_id:
  140. prev, latest = latest, acc
  141. if prev:
  142. # backends are not expected to keep history,
  143. # so we delete everything except the most recent state.
  144. prev.ack()
  145. prev = None
  146. else:
  147. raise self.BacklogLimitExceeded(task_id)
  148. if latest:
  149. payload = self._cache[task_id] = \
  150. self.meta_from_decoded(latest.payload)
  151. latest.requeue()
  152. return payload
  153. else:
  154. # no new state, use previous
  155. try:
  156. return self._cache[task_id]
  157. except KeyError:
  158. # result probably pending.
  159. return {'status': states.PENDING, 'result': None}
  160. poll = get_task_meta # XXX compat
  161. def drain_events(self, connection, consumer,
  162. timeout=None, on_interval=None, now=monotonic, wait=None):
  163. wait = wait or connection.drain_events
  164. results = {}
  165. def callback(meta, message):
  166. if meta['status'] in states.READY_STATES:
  167. results[meta['task_id']] = self.meta_from_decoded(meta)
  168. consumer.callbacks[:] = [callback]
  169. time_start = now()
  170. while 1:
  171. # Total time spent may exceed a single call to wait()
  172. if timeout and now() - time_start >= timeout:
  173. raise socket.timeout()
  174. try:
  175. wait(timeout=1)
  176. except socket.timeout:
  177. pass
  178. if on_interval:
  179. on_interval()
  180. if results: # got event on the wanted channel.
  181. break
  182. self._cache.update(results)
  183. return results
  184. def consume(self, task_id, timeout=None, no_ack=True, on_interval=None):
  185. wait = self.drain_events
  186. with self.app.pool.acquire_channel(block=True) as (conn, channel):
  187. binding = self._create_binding(task_id)
  188. with self.Consumer(channel, binding,
  189. no_ack=no_ack, accept=self.accept) as consumer:
  190. while 1:
  191. try:
  192. return wait(
  193. conn, consumer, timeout, on_interval)[task_id]
  194. except KeyError:
  195. continue
  196. def _many_bindings(self, ids):
  197. return [self._create_binding(task_id) for task_id in ids]
  198. def get_many(self, task_ids, timeout=None, no_ack=True,
  199. now=monotonic, getfields=itemgetter('status', 'task_id'),
  200. READY_STATES=states.READY_STATES,
  201. PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs):
  202. with self.app.pool.acquire_channel(block=True) as (conn, channel):
  203. ids = set(task_ids)
  204. cached_ids = set()
  205. mark_cached = cached_ids.add
  206. for task_id in ids:
  207. try:
  208. cached = self._cache[task_id]
  209. except KeyError:
  210. pass
  211. else:
  212. if cached['status'] in READY_STATES:
  213. yield task_id, cached
  214. mark_cached(task_id)
  215. ids.difference_update(cached_ids)
  216. results = deque()
  217. push_result = results.append
  218. push_cache = self._cache.__setitem__
  219. decode_result = self.meta_from_decoded
  220. def on_message(message):
  221. body = decode_result(message.decode())
  222. state, uid = getfields(body)
  223. if state in READY_STATES:
  224. push_result(body) \
  225. if uid in task_ids else push_cache(uid, body)
  226. bindings = self._many_bindings(task_ids)
  227. with self.Consumer(channel, bindings, on_message=on_message,
  228. accept=self.accept, no_ack=no_ack):
  229. wait = conn.drain_events
  230. popleft = results.popleft
  231. while ids:
  232. wait(timeout=timeout)
  233. while results:
  234. state = popleft()
  235. task_id = state['task_id']
  236. ids.discard(task_id)
  237. push_cache(task_id, state)
  238. yield task_id, state
  239. def reload_task_result(self, task_id):
  240. raise NotImplementedError(
  241. 'reload_task_result is not supported by this backend.')
  242. def reload_group_result(self, task_id):
  243. """Reload group result, even if it has been previously fetched."""
  244. raise NotImplementedError(
  245. 'reload_group_result is not supported by this backend.')
  246. def save_group(self, group_id, result):
  247. raise NotImplementedError(
  248. 'save_group is not supported by this backend.')
  249. def restore_group(self, group_id, cache=True):
  250. raise NotImplementedError(
  251. 'restore_group is not supported by this backend.')
  252. def delete_group(self, group_id):
  253. raise NotImplementedError(
  254. 'delete_group is not supported by this backend.')
  255. def as_uri(self, include_password=True):
  256. return 'amqp://'
  257. def __reduce__(self, args=(), kwargs={}):
  258. kwargs.update(
  259. connection=self._connection,
  260. exchange=self.exchange.name,
  261. exchange_type=self.exchange.type,
  262. persistent=self.persistent,
  263. serializer=self.serializer,
  264. auto_delete=self.auto_delete,
  265. expires=self.expires,
  266. )
  267. return super(AMQPBackend, self).__reduce__(args, kwargs)