Development of an internal social media platform with personalised dashboards for students
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

state.py 6.6KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. # -*- coding: utf-8 -*-
  2. """
  3. celery.worker.state
  4. ~~~~~~~~~~~~~~~~~~~
  5. Internal worker state (global)
  6. This includes the currently active and reserved tasks,
  7. statistics, and revoked tasks.
  8. """
  9. from __future__ import absolute_import
  10. import os
  11. import sys
  12. import platform
  13. import shelve
  14. import zlib
  15. from kombu.serialization import pickle, pickle_protocol
  16. from kombu.utils import cached_property
  17. from celery import __version__
  18. from celery.datastructures import LimitedSet
  19. from celery.exceptions import WorkerShutdown, WorkerTerminate
  20. from celery.five import Counter
  21. __all__ = ['SOFTWARE_INFO', 'reserved_requests', 'active_requests',
  22. 'total_count', 'revoked', 'task_reserved', 'maybe_shutdown',
  23. 'task_accepted', 'task_ready', 'task_reserved', 'task_ready',
  24. 'Persistent']
  25. #: Worker software/platform information.
  26. SOFTWARE_INFO = {'sw_ident': 'py-celery',
  27. 'sw_ver': __version__,
  28. 'sw_sys': platform.system()}
  29. #: maximum number of revokes to keep in memory.
  30. REVOKES_MAX = 50000
  31. #: how many seconds a revoke will be active before
  32. #: being expired when the max limit has been exceeded.
  33. REVOKE_EXPIRES = 10800
  34. #: set of all reserved :class:`~celery.worker.job.Request`'s.
  35. reserved_requests = set()
  36. #: set of currently active :class:`~celery.worker.job.Request`'s.
  37. active_requests = set()
  38. #: count of tasks accepted by the worker, sorted by type.
  39. total_count = Counter()
  40. #: count of all tasks accepted by the worker
  41. all_total_count = [0]
  42. #: the list of currently revoked tasks. Persistent if statedb set.
  43. revoked = LimitedSet(maxlen=REVOKES_MAX, expires=REVOKE_EXPIRES)
  44. #: Update global state when a task has been reserved.
  45. task_reserved = reserved_requests.add
  46. should_stop = False
  47. should_terminate = False
  48. def reset_state():
  49. reserved_requests.clear()
  50. active_requests.clear()
  51. total_count.clear()
  52. all_total_count[:] = [0]
  53. revoked.clear()
  54. def maybe_shutdown():
  55. if should_stop:
  56. raise WorkerShutdown()
  57. elif should_terminate:
  58. raise WorkerTerminate()
  59. def task_accepted(request, _all_total_count=all_total_count):
  60. """Updates global state when a task has been accepted."""
  61. active_requests.add(request)
  62. total_count[request.name] += 1
  63. all_total_count[0] += 1
  64. def task_ready(request):
  65. """Updates global state when a task is ready."""
  66. active_requests.discard(request)
  67. reserved_requests.discard(request)
  68. C_BENCH = os.environ.get('C_BENCH') or os.environ.get('CELERY_BENCH')
  69. C_BENCH_EVERY = int(os.environ.get('C_BENCH_EVERY') or
  70. os.environ.get('CELERY_BENCH_EVERY') or 1000)
  71. if C_BENCH: # pragma: no cover
  72. import atexit
  73. from billiard import current_process
  74. from celery.five import monotonic
  75. from celery.utils.debug import memdump, sample_mem
  76. all_count = 0
  77. bench_first = None
  78. bench_start = None
  79. bench_last = None
  80. bench_every = C_BENCH_EVERY
  81. bench_sample = []
  82. __reserved = task_reserved
  83. __ready = task_ready
  84. if current_process()._name == 'MainProcess':
  85. @atexit.register
  86. def on_shutdown():
  87. if bench_first is not None and bench_last is not None:
  88. print('- Time spent in benchmark: {0!r}'.format(
  89. bench_last - bench_first))
  90. print('- Avg: {0}'.format(
  91. sum(bench_sample) / len(bench_sample)))
  92. memdump()
  93. def task_reserved(request): # noqa
  94. global bench_start
  95. global bench_first
  96. now = None
  97. if bench_start is None:
  98. bench_start = now = monotonic()
  99. if bench_first is None:
  100. bench_first = now
  101. return __reserved(request)
  102. def task_ready(request): # noqa
  103. global all_count
  104. global bench_start
  105. global bench_last
  106. all_count += 1
  107. if not all_count % bench_every:
  108. now = monotonic()
  109. diff = now - bench_start
  110. print('- Time spent processing {0} tasks (since first '
  111. 'task received): ~{1:.4f}s\n'.format(bench_every, diff))
  112. sys.stdout.flush()
  113. bench_start = bench_last = now
  114. bench_sample.append(diff)
  115. sample_mem()
  116. return __ready(request)
  117. class Persistent(object):
  118. """This is the persistent data stored by the worker when
  119. :option:`--statedb` is enabled.
  120. It currently only stores revoked task id's.
  121. """
  122. storage = shelve
  123. protocol = pickle_protocol
  124. compress = zlib.compress
  125. decompress = zlib.decompress
  126. _is_open = False
  127. def __init__(self, state, filename, clock=None):
  128. self.state = state
  129. self.filename = filename
  130. self.clock = clock
  131. self.merge()
  132. def open(self):
  133. return self.storage.open(
  134. self.filename, protocol=self.protocol, writeback=True,
  135. )
  136. def merge(self):
  137. self._merge_with(self.db)
  138. def sync(self):
  139. self._sync_with(self.db)
  140. self.db.sync()
  141. def close(self):
  142. if self._is_open:
  143. self.db.close()
  144. self._is_open = False
  145. def save(self):
  146. self.sync()
  147. self.close()
  148. def _merge_with(self, d):
  149. self._merge_revoked(d)
  150. self._merge_clock(d)
  151. return d
  152. def _sync_with(self, d):
  153. self._revoked_tasks.purge()
  154. d.update(
  155. __proto__=3,
  156. zrevoked=self.compress(self._dumps(self._revoked_tasks)),
  157. clock=self.clock.forward() if self.clock else 0,
  158. )
  159. return d
  160. def _merge_clock(self, d):
  161. if self.clock:
  162. d['clock'] = self.clock.adjust(d.get('clock') or 0)
  163. def _merge_revoked(self, d):
  164. try:
  165. self._merge_revoked_v3(d['zrevoked'])
  166. except KeyError:
  167. try:
  168. self._merge_revoked_v2(d.pop('revoked'))
  169. except KeyError:
  170. pass
  171. # purge expired items at boot
  172. self._revoked_tasks.purge()
  173. def _merge_revoked_v3(self, zrevoked):
  174. if zrevoked:
  175. self._revoked_tasks.update(pickle.loads(self.decompress(zrevoked)))
  176. def _merge_revoked_v2(self, saved):
  177. if not isinstance(saved, LimitedSet):
  178. # (pre 3.0.18) used to be stored as a dict
  179. return self._merge_revoked_v1(saved)
  180. self._revoked_tasks.update(saved)
  181. def _merge_revoked_v1(self, saved):
  182. add = self._revoked_tasks.add
  183. for item in saved:
  184. add(item)
  185. def _dumps(self, obj):
  186. return pickle.dumps(obj, protocol=self.protocol)
  187. @property
  188. def _revoked_tasks(self):
  189. return self.state.revoked
  190. @cached_property
  191. def db(self):
  192. self._is_open = True
  193. return self.open()