Development of an internal social media platform with personalised dashboards for students
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

result.py 28KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925
  1. # -*- coding: utf-8 -*-
  2. """
  3. celery.result
  4. ~~~~~~~~~~~~~
  5. Task results/state and groups of results.
  6. """
  7. from __future__ import absolute_import
  8. import time
  9. import warnings
  10. from collections import deque
  11. from contextlib import contextmanager
  12. from copy import copy
  13. from kombu.utils import cached_property
  14. from kombu.utils.compat import OrderedDict
  15. from . import current_app
  16. from . import states
  17. from ._state import _set_task_join_will_block, task_join_will_block
  18. from .app import app_or_default
  19. from .datastructures import DependencyGraph, GraphFormatter
  20. from .exceptions import IncompleteStream, TimeoutError
  21. from .five import items, range, string_t, monotonic
  22. from .utils import deprecated
  23. __all__ = ['ResultBase', 'AsyncResult', 'ResultSet', 'GroupResult',
  24. 'EagerResult', 'result_from_tuple']
  25. E_WOULDBLOCK = """\
  26. Never call result.get() within a task!
  27. See http://docs.celeryq.org/en/latest/userguide/tasks.html\
  28. #task-synchronous-subtasks
  29. In Celery 3.2 this will result in an exception being
  30. raised instead of just being a warning.
  31. """
  32. def assert_will_not_block():
  33. if task_join_will_block():
  34. warnings.warn(RuntimeWarning(E_WOULDBLOCK))
  35. @contextmanager
  36. def allow_join_result():
  37. reset_value = task_join_will_block()
  38. _set_task_join_will_block(False)
  39. try:
  40. yield
  41. finally:
  42. _set_task_join_will_block(reset_value)
  43. class ResultBase(object):
  44. """Base class for all results"""
  45. #: Parent result (if part of a chain)
  46. parent = None
  47. class AsyncResult(ResultBase):
  48. """Query task state.
  49. :param id: see :attr:`id`.
  50. :keyword backend: see :attr:`backend`.
  51. """
  52. app = None
  53. #: Error raised for timeouts.
  54. TimeoutError = TimeoutError
  55. #: The task's UUID.
  56. id = None
  57. #: The task result backend to use.
  58. backend = None
  59. def __init__(self, id, backend=None, task_name=None,
  60. app=None, parent=None):
  61. self.app = app_or_default(app or self.app)
  62. self.id = id
  63. self.backend = backend or self.app.backend
  64. self.task_name = task_name
  65. self.parent = parent
  66. self._cache = None
  67. def as_tuple(self):
  68. parent = self.parent
  69. return (self.id, parent and parent.as_tuple()), None
  70. serializable = as_tuple # XXX compat
  71. def forget(self):
  72. """Forget about (and possibly remove the result of) this task."""
  73. self._cache = None
  74. self.backend.forget(self.id)
  75. def revoke(self, connection=None, terminate=False, signal=None,
  76. wait=False, timeout=None):
  77. """Send revoke signal to all workers.
  78. Any worker receiving the task, or having reserved the
  79. task, *must* ignore it.
  80. :keyword terminate: Also terminate the process currently working
  81. on the task (if any).
  82. :keyword signal: Name of signal to send to process if terminate.
  83. Default is TERM.
  84. :keyword wait: Wait for replies from workers. Will wait for 1 second
  85. by default or you can specify a custom ``timeout``.
  86. :keyword timeout: Time in seconds to wait for replies if ``wait``
  87. enabled.
  88. """
  89. self.app.control.revoke(self.id, connection=connection,
  90. terminate=terminate, signal=signal,
  91. reply=wait, timeout=timeout)
  92. def get(self, timeout=None, propagate=True, interval=0.5,
  93. no_ack=True, follow_parents=True,
  94. EXCEPTION_STATES=states.EXCEPTION_STATES,
  95. PROPAGATE_STATES=states.PROPAGATE_STATES):
  96. """Wait until task is ready, and return its result.
  97. .. warning::
  98. Waiting for tasks within a task may lead to deadlocks.
  99. Please read :ref:`task-synchronous-subtasks`.
  100. :keyword timeout: How long to wait, in seconds, before the
  101. operation times out.
  102. :keyword propagate: Re-raise exception if the task failed.
  103. :keyword interval: Time to wait (in seconds) before retrying to
  104. retrieve the result. Note that this does not have any effect
  105. when using the amqp result store backend, as it does not
  106. use polling.
  107. :keyword no_ack: Enable amqp no ack (automatically acknowledge
  108. message). If this is :const:`False` then the message will
  109. **not be acked**.
  110. :keyword follow_parents: Reraise any exception raised by parent task.
  111. :raises celery.exceptions.TimeoutError: if `timeout` is not
  112. :const:`None` and the result does not arrive within `timeout`
  113. seconds.
  114. If the remote call raised an exception then that exception will
  115. be re-raised.
  116. """
  117. assert_will_not_block()
  118. on_interval = None
  119. if follow_parents and propagate and self.parent:
  120. on_interval = self._maybe_reraise_parent_error
  121. on_interval()
  122. if self._cache:
  123. if propagate:
  124. self.maybe_reraise()
  125. return self.result
  126. meta = self.backend.wait_for(
  127. self.id, timeout=timeout,
  128. interval=interval,
  129. on_interval=on_interval,
  130. no_ack=no_ack,
  131. )
  132. if meta:
  133. self._maybe_set_cache(meta)
  134. status = meta['status']
  135. if status in PROPAGATE_STATES and propagate:
  136. raise meta['result']
  137. return meta['result']
  138. wait = get # deprecated alias to :meth:`get`.
  139. def _maybe_reraise_parent_error(self):
  140. for node in reversed(list(self._parents())):
  141. node.maybe_reraise()
  142. def _parents(self):
  143. node = self.parent
  144. while node:
  145. yield node
  146. node = node.parent
  147. def collect(self, intermediate=False, **kwargs):
  148. """Iterator, like :meth:`get` will wait for the task to complete,
  149. but will also follow :class:`AsyncResult` and :class:`ResultSet`
  150. returned by the task, yielding ``(result, value)`` tuples for each
  151. result in the tree.
  152. An example would be having the following tasks:
  153. .. code-block:: python
  154. from celery import group
  155. from proj.celery import app
  156. @app.task(trail=True)
  157. def A(how_many):
  158. return group(B.s(i) for i in range(how_many))()
  159. @app.task(trail=True)
  160. def B(i):
  161. return pow2.delay(i)
  162. @app.task(trail=True)
  163. def pow2(i):
  164. return i ** 2
  165. Note that the ``trail`` option must be enabled
  166. so that the list of children is stored in ``result.children``.
  167. This is the default but enabled explicitly for illustration.
  168. Calling :meth:`collect` would return:
  169. .. code-block:: python
  170. >>> from celery.result import ResultBase
  171. >>> from proj.tasks import A
  172. >>> result = A.delay(10)
  173. >>> [v for v in result.collect()
  174. ... if not isinstance(v, (ResultBase, tuple))]
  175. [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
  176. """
  177. for _, R in self.iterdeps(intermediate=intermediate):
  178. yield R, R.get(**kwargs)
  179. def get_leaf(self):
  180. value = None
  181. for _, R in self.iterdeps():
  182. value = R.get()
  183. return value
  184. def iterdeps(self, intermediate=False):
  185. stack = deque([(None, self)])
  186. while stack:
  187. parent, node = stack.popleft()
  188. yield parent, node
  189. if node.ready():
  190. stack.extend((node, child) for child in node.children or [])
  191. else:
  192. if not intermediate:
  193. raise IncompleteStream()
  194. def ready(self):
  195. """Returns :const:`True` if the task has been executed.
  196. If the task is still running, pending, or is waiting
  197. for retry then :const:`False` is returned.
  198. """
  199. return self.state in self.backend.READY_STATES
  200. def successful(self):
  201. """Returns :const:`True` if the task executed successfully."""
  202. return self.state == states.SUCCESS
  203. def failed(self):
  204. """Returns :const:`True` if the task failed."""
  205. return self.state == states.FAILURE
  206. def maybe_reraise(self):
  207. if self.state in states.PROPAGATE_STATES:
  208. raise self.result
  209. def build_graph(self, intermediate=False, formatter=None):
  210. graph = DependencyGraph(
  211. formatter=formatter or GraphFormatter(root=self.id, shape='oval'),
  212. )
  213. for parent, node in self.iterdeps(intermediate=intermediate):
  214. graph.add_arc(node)
  215. if parent:
  216. graph.add_edge(parent, node)
  217. return graph
  218. def __str__(self):
  219. """`str(self) -> self.id`"""
  220. return str(self.id)
  221. def __hash__(self):
  222. """`hash(self) -> hash(self.id)`"""
  223. return hash(self.id)
  224. def __repr__(self):
  225. return '<{0}: {1}>'.format(type(self).__name__, self.id)
  226. def __eq__(self, other):
  227. if isinstance(other, AsyncResult):
  228. return other.id == self.id
  229. elif isinstance(other, string_t):
  230. return other == self.id
  231. return NotImplemented
  232. def __ne__(self, other):
  233. return not self.__eq__(other)
  234. def __copy__(self):
  235. return self.__class__(
  236. self.id, self.backend, self.task_name, self.app, self.parent,
  237. )
  238. def __reduce__(self):
  239. return self.__class__, self.__reduce_args__()
  240. def __reduce_args__(self):
  241. return self.id, self.backend, self.task_name, None, self.parent
  242. def __del__(self):
  243. self._cache = None
  244. @cached_property
  245. def graph(self):
  246. return self.build_graph()
  247. @property
  248. def supports_native_join(self):
  249. return self.backend.supports_native_join
  250. @property
  251. def children(self):
  252. return self._get_task_meta().get('children')
  253. def _maybe_set_cache(self, meta):
  254. if meta:
  255. state = meta['status']
  256. if state == states.SUCCESS or state in states.PROPAGATE_STATES:
  257. return self._set_cache(meta)
  258. return meta
  259. def _get_task_meta(self):
  260. if self._cache is None:
  261. return self._maybe_set_cache(self.backend.get_task_meta(self.id))
  262. return self._cache
  263. def _set_cache(self, d):
  264. children = d.get('children')
  265. if children:
  266. d['children'] = [
  267. result_from_tuple(child, self.app) for child in children
  268. ]
  269. self._cache = d
  270. return d
  271. @property
  272. def result(self):
  273. """When the task has been executed, this contains the return value.
  274. If the task raised an exception, this will be the exception
  275. instance."""
  276. return self._get_task_meta()['result']
  277. info = result
  278. @property
  279. def traceback(self):
  280. """Get the traceback of a failed task."""
  281. return self._get_task_meta().get('traceback')
  282. @property
  283. def state(self):
  284. """The tasks current state.
  285. Possible values includes:
  286. *PENDING*
  287. The task is waiting for execution.
  288. *STARTED*
  289. The task has been started.
  290. *RETRY*
  291. The task is to be retried, possibly because of failure.
  292. *FAILURE*
  293. The task raised an exception, or has exceeded the retry limit.
  294. The :attr:`result` attribute then contains the
  295. exception raised by the task.
  296. *SUCCESS*
  297. The task executed successfully. The :attr:`result` attribute
  298. then contains the tasks return value.
  299. """
  300. return self._get_task_meta()['status']
  301. status = state
  302. @property
  303. def task_id(self):
  304. """compat alias to :attr:`id`"""
  305. return self.id
  306. @task_id.setter # noqa
  307. def task_id(self, id):
  308. self.id = id
  309. BaseAsyncResult = AsyncResult # for backwards compatibility.
  310. class ResultSet(ResultBase):
  311. """Working with more than one result.
  312. :param results: List of result instances.
  313. """
  314. app = None
  315. #: List of results in in the set.
  316. results = None
  317. def __init__(self, results, app=None, **kwargs):
  318. self.app = app_or_default(app or self.app)
  319. self.results = results
  320. def add(self, result):
  321. """Add :class:`AsyncResult` as a new member of the set.
  322. Does nothing if the result is already a member.
  323. """
  324. if result not in self.results:
  325. self.results.append(result)
  326. def remove(self, result):
  327. """Remove result from the set; it must be a member.
  328. :raises KeyError: if the result is not a member.
  329. """
  330. if isinstance(result, string_t):
  331. result = self.app.AsyncResult(result)
  332. try:
  333. self.results.remove(result)
  334. except ValueError:
  335. raise KeyError(result)
  336. def discard(self, result):
  337. """Remove result from the set if it is a member.
  338. If it is not a member, do nothing.
  339. """
  340. try:
  341. self.remove(result)
  342. except KeyError:
  343. pass
  344. def update(self, results):
  345. """Update set with the union of itself and an iterable with
  346. results."""
  347. self.results.extend(r for r in results if r not in self.results)
  348. def clear(self):
  349. """Remove all results from this set."""
  350. self.results[:] = [] # don't create new list.
  351. def successful(self):
  352. """Was all of the tasks successful?
  353. :returns: :const:`True` if all of the tasks finished
  354. successfully (i.e. did not raise an exception).
  355. """
  356. return all(result.successful() for result in self.results)
  357. def failed(self):
  358. """Did any of the tasks fail?
  359. :returns: :const:`True` if one of the tasks failed.
  360. (i.e., raised an exception)
  361. """
  362. return any(result.failed() for result in self.results)
  363. def maybe_reraise(self):
  364. for result in self.results:
  365. result.maybe_reraise()
  366. def waiting(self):
  367. """Are any of the tasks incomplete?
  368. :returns: :const:`True` if one of the tasks are still
  369. waiting for execution.
  370. """
  371. return any(not result.ready() for result in self.results)
  372. def ready(self):
  373. """Did all of the tasks complete? (either by success of failure).
  374. :returns: :const:`True` if all of the tasks has been
  375. executed.
  376. """
  377. return all(result.ready() for result in self.results)
  378. def completed_count(self):
  379. """Task completion count.
  380. :returns: the number of tasks completed.
  381. """
  382. return sum(int(result.successful()) for result in self.results)
  383. def forget(self):
  384. """Forget about (and possible remove the result of) all the tasks."""
  385. for result in self.results:
  386. result.forget()
  387. def revoke(self, connection=None, terminate=False, signal=None,
  388. wait=False, timeout=None):
  389. """Send revoke signal to all workers for all tasks in the set.
  390. :keyword terminate: Also terminate the process currently working
  391. on the task (if any).
  392. :keyword signal: Name of signal to send to process if terminate.
  393. Default is TERM.
  394. :keyword wait: Wait for replies from worker. Will wait for 1 second
  395. by default or you can specify a custom ``timeout``.
  396. :keyword timeout: Time in seconds to wait for replies if ``wait``
  397. enabled.
  398. """
  399. self.app.control.revoke([r.id for r in self.results],
  400. connection=connection, timeout=timeout,
  401. terminate=terminate, signal=signal, reply=wait)
  402. def __iter__(self):
  403. return iter(self.results)
  404. def __getitem__(self, index):
  405. """`res[i] -> res.results[i]`"""
  406. return self.results[index]
  407. @deprecated('3.2', '3.3')
  408. def iterate(self, timeout=None, propagate=True, interval=0.5):
  409. """Deprecated method, use :meth:`get` with a callback argument."""
  410. elapsed = 0.0
  411. results = OrderedDict((result.id, copy(result))
  412. for result in self.results)
  413. while results:
  414. removed = set()
  415. for task_id, result in items(results):
  416. if result.ready():
  417. yield result.get(timeout=timeout and timeout - elapsed,
  418. propagate=propagate)
  419. removed.add(task_id)
  420. else:
  421. if result.backend.subpolling_interval:
  422. time.sleep(result.backend.subpolling_interval)
  423. for task_id in removed:
  424. results.pop(task_id, None)
  425. time.sleep(interval)
  426. elapsed += interval
  427. if timeout and elapsed >= timeout:
  428. raise TimeoutError('The operation timed out')
  429. def get(self, timeout=None, propagate=True, interval=0.5,
  430. callback=None, no_ack=True):
  431. """See :meth:`join`
  432. This is here for API compatibility with :class:`AsyncResult`,
  433. in addition it uses :meth:`join_native` if available for the
  434. current result backend.
  435. """
  436. return (self.join_native if self.supports_native_join else self.join)(
  437. timeout=timeout, propagate=propagate,
  438. interval=interval, callback=callback, no_ack=no_ack)
  439. def join(self, timeout=None, propagate=True, interval=0.5,
  440. callback=None, no_ack=True):
  441. """Gathers the results of all tasks as a list in order.
  442. .. note::
  443. This can be an expensive operation for result store
  444. backends that must resort to polling (e.g. database).
  445. You should consider using :meth:`join_native` if your backend
  446. supports it.
  447. .. warning::
  448. Waiting for tasks within a task may lead to deadlocks.
  449. Please see :ref:`task-synchronous-subtasks`.
  450. :keyword timeout: The number of seconds to wait for results before
  451. the operation times out.
  452. :keyword propagate: If any of the tasks raises an exception, the
  453. exception will be re-raised.
  454. :keyword interval: Time to wait (in seconds) before retrying to
  455. retrieve a result from the set. Note that this
  456. does not have any effect when using the amqp
  457. result store backend, as it does not use polling.
  458. :keyword callback: Optional callback to be called for every result
  459. received. Must have signature ``(task_id, value)``
  460. No results will be returned by this function if
  461. a callback is specified. The order of results
  462. is also arbitrary when a callback is used.
  463. To get access to the result object for a particular
  464. id you will have to generate an index first:
  465. ``index = {r.id: r for r in gres.results.values()}``
  466. Or you can create new result objects on the fly:
  467. ``result = app.AsyncResult(task_id)`` (both will
  468. take advantage of the backend cache anyway).
  469. :keyword no_ack: Automatic message acknowledgement (Note that if this
  470. is set to :const:`False` then the messages *will not be
  471. acknowledged*).
  472. :raises celery.exceptions.TimeoutError: if ``timeout`` is not
  473. :const:`None` and the operation takes longer than ``timeout``
  474. seconds.
  475. """
  476. assert_will_not_block()
  477. time_start = monotonic()
  478. remaining = None
  479. results = []
  480. for result in self.results:
  481. remaining = None
  482. if timeout:
  483. remaining = timeout - (monotonic() - time_start)
  484. if remaining <= 0.0:
  485. raise TimeoutError('join operation timed out')
  486. value = result.get(
  487. timeout=remaining, propagate=propagate,
  488. interval=interval, no_ack=no_ack,
  489. )
  490. if callback:
  491. callback(result.id, value)
  492. else:
  493. results.append(value)
  494. return results
  495. def iter_native(self, timeout=None, interval=0.5, no_ack=True):
  496. """Backend optimized version of :meth:`iterate`.
  497. .. versionadded:: 2.2
  498. Note that this does not support collecting the results
  499. for different task types using different backends.
  500. This is currently only supported by the amqp, Redis and cache
  501. result backends.
  502. """
  503. results = self.results
  504. if not results:
  505. return iter([])
  506. return self.backend.get_many(
  507. set(r.id for r in results),
  508. timeout=timeout, interval=interval, no_ack=no_ack,
  509. )
  510. def join_native(self, timeout=None, propagate=True,
  511. interval=0.5, callback=None, no_ack=True):
  512. """Backend optimized version of :meth:`join`.
  513. .. versionadded:: 2.2
  514. Note that this does not support collecting the results
  515. for different task types using different backends.
  516. This is currently only supported by the amqp, Redis and cache
  517. result backends.
  518. """
  519. assert_will_not_block()
  520. order_index = None if callback else dict(
  521. (result.id, i) for i, result in enumerate(self.results)
  522. )
  523. acc = None if callback else [None for _ in range(len(self))]
  524. for task_id, meta in self.iter_native(timeout, interval, no_ack):
  525. value = meta['result']
  526. if propagate and meta['status'] in states.PROPAGATE_STATES:
  527. raise value
  528. if callback:
  529. callback(task_id, value)
  530. else:
  531. acc[order_index[task_id]] = value
  532. return acc
  533. def _failed_join_report(self):
  534. return (res for res in self.results
  535. if res.backend.is_cached(res.id) and
  536. res.state in states.PROPAGATE_STATES)
  537. def __len__(self):
  538. return len(self.results)
  539. def __eq__(self, other):
  540. if isinstance(other, ResultSet):
  541. return other.results == self.results
  542. return NotImplemented
  543. def __ne__(self, other):
  544. return not self.__eq__(other)
  545. def __repr__(self):
  546. return '<{0}: [{1}]>'.format(type(self).__name__,
  547. ', '.join(r.id for r in self.results))
  548. @property
  549. def subtasks(self):
  550. """Deprecated alias to :attr:`results`."""
  551. return self.results
  552. @property
  553. def supports_native_join(self):
  554. try:
  555. return self.results[0].supports_native_join
  556. except IndexError:
  557. pass
  558. @property
  559. def backend(self):
  560. return self.app.backend if self.app else self.results[0].backend
  561. class GroupResult(ResultSet):
  562. """Like :class:`ResultSet`, but with an associated id.
  563. This type is returned by :class:`~celery.group`, and the
  564. deprecated TaskSet, meth:`~celery.task.TaskSet.apply_async` method.
  565. It enables inspection of the tasks state and return values as
  566. a single entity.
  567. :param id: The id of the group.
  568. :param results: List of result instances.
  569. """
  570. #: The UUID of the group.
  571. id = None
  572. #: List/iterator of results in the group
  573. results = None
  574. def __init__(self, id=None, results=None, **kwargs):
  575. self.id = id
  576. ResultSet.__init__(self, results, **kwargs)
  577. def save(self, backend=None):
  578. """Save group-result for later retrieval using :meth:`restore`.
  579. Example::
  580. >>> def save_and_restore(result):
  581. ... result.save()
  582. ... result = GroupResult.restore(result.id)
  583. """
  584. return (backend or self.app.backend).save_group(self.id, self)
  585. def delete(self, backend=None):
  586. """Remove this result if it was previously saved."""
  587. (backend or self.app.backend).delete_group(self.id)
  588. def __reduce__(self):
  589. return self.__class__, self.__reduce_args__()
  590. def __reduce_args__(self):
  591. return self.id, self.results
  592. def __bool__(self):
  593. return bool(self.id or self.results)
  594. __nonzero__ = __bool__ # Included for Py2 backwards compatibility
  595. def __eq__(self, other):
  596. if isinstance(other, GroupResult):
  597. return other.id == self.id and other.results == self.results
  598. return NotImplemented
  599. def __ne__(self, other):
  600. return not self.__eq__(other)
  601. def __repr__(self):
  602. return '<{0}: {1} [{2}]>'.format(type(self).__name__, self.id,
  603. ', '.join(r.id for r in self.results))
  604. def as_tuple(self):
  605. return self.id, [r.as_tuple() for r in self.results]
  606. serializable = as_tuple # XXX compat
  607. @property
  608. def children(self):
  609. return self.results
  610. @classmethod
  611. def restore(self, id, backend=None):
  612. """Restore previously saved group result."""
  613. return (
  614. backend or (self.app.backend if self.app else current_app.backend)
  615. ).restore_group(id)
  616. class TaskSetResult(GroupResult):
  617. """Deprecated version of :class:`GroupResult`"""
  618. def __init__(self, taskset_id, results=None, **kwargs):
  619. # XXX supports the taskset_id kwarg.
  620. # XXX previously the "results" arg was named "subtasks".
  621. if 'subtasks' in kwargs:
  622. results = kwargs['subtasks']
  623. GroupResult.__init__(self, taskset_id, results, **kwargs)
  624. def itersubtasks(self):
  625. """Deprecated. Use ``iter(self.results)`` instead."""
  626. return iter(self.results)
  627. @property
  628. def total(self):
  629. """Deprecated: Use ``len(r)``."""
  630. return len(self)
  631. @property
  632. def taskset_id(self):
  633. """compat alias to :attr:`self.id`"""
  634. return self.id
  635. @taskset_id.setter # noqa
  636. def taskset_id(self, id):
  637. self.id = id
  638. class EagerResult(AsyncResult):
  639. """Result that we know has already been executed."""
  640. task_name = None
  641. def __init__(self, id, ret_value, state, traceback=None):
  642. self.id = id
  643. self._result = ret_value
  644. self._state = state
  645. self._traceback = traceback
  646. def _get_task_meta(self):
  647. return {'task_id': self.id, 'result': self._result, 'status':
  648. self._state, 'traceback': self._traceback}
  649. def __reduce__(self):
  650. return self.__class__, self.__reduce_args__()
  651. def __reduce_args__(self):
  652. return (self.id, self._result, self._state, self._traceback)
  653. def __copy__(self):
  654. cls, args = self.__reduce__()
  655. return cls(*args)
  656. def ready(self):
  657. return True
  658. def get(self, timeout=None, propagate=True, **kwargs):
  659. if self.successful():
  660. return self.result
  661. elif self.state in states.PROPAGATE_STATES:
  662. if propagate:
  663. raise self.result
  664. return self.result
  665. wait = get
  666. def forget(self):
  667. pass
  668. def revoke(self, *args, **kwargs):
  669. self._state = states.REVOKED
  670. def __repr__(self):
  671. return '<EagerResult: {0.id}>'.format(self)
  672. @property
  673. def result(self):
  674. """The tasks return value"""
  675. return self._result
  676. @property
  677. def state(self):
  678. """The tasks state."""
  679. return self._state
  680. status = state
  681. @property
  682. def traceback(self):
  683. """The traceback if the task failed."""
  684. return self._traceback
  685. @property
  686. def supports_native_join(self):
  687. return False
  688. def result_from_tuple(r, app=None):
  689. # earlier backends may just pickle, so check if
  690. # result is already prepared.
  691. app = app_or_default(app)
  692. Result = app.AsyncResult
  693. if not isinstance(r, ResultBase):
  694. res, nodes = r
  695. if nodes:
  696. return app.GroupResult(
  697. res, [result_from_tuple(child, app) for child in nodes],
  698. )
  699. # previously did not include parent
  700. id, parent = res if isinstance(res, (list, tuple)) else (res, None)
  701. if parent:
  702. parent = result_from_tuple(parent, app)
  703. return Result(id, parent=parent)
  704. return r
  705. from_serializable = result_from_tuple # XXX compat