Development of an internal social media platform with personalised dashboards for students
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

state.py 23KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656
  1. # -*- coding: utf-8 -*-
  2. """
  3. celery.events.state
  4. ~~~~~~~~~~~~~~~~~~~
  5. This module implements a datastructure used to keep
  6. track of the state of a cluster of workers and the tasks
  7. it is working on (by consuming events).
  8. For every event consumed the state is updated,
  9. so the state represents the state of the cluster
  10. at the time of the last event.
  11. Snapshots (:mod:`celery.events.snapshot`) can be used to
  12. take "pictures" of this state at regular intervals
  13. to e.g. store that in a database.
  14. """
  15. from __future__ import absolute_import
  16. import bisect
  17. import sys
  18. import threading
  19. from datetime import datetime
  20. from decimal import Decimal
  21. from itertools import islice
  22. from operator import itemgetter
  23. from time import time
  24. from weakref import ref
  25. from kombu.clocks import timetuple
  26. from kombu.utils import cached_property, kwdict
  27. from celery import states
  28. from celery.five import class_property, items, values
  29. from celery.utils import deprecated
  30. from celery.utils.functional import LRUCache, memoize
  31. from celery.utils.log import get_logger
  32. PYPY = hasattr(sys, 'pypy_version_info')
  33. # The window (in percentage) is added to the workers heartbeat
  34. # frequency. If the time between updates exceeds this window,
  35. # then the worker is considered to be offline.
  36. HEARTBEAT_EXPIRE_WINDOW = 200
  37. # Max drift between event timestamp and time of event received
  38. # before we alert that clocks may be unsynchronized.
  39. HEARTBEAT_DRIFT_MAX = 16
  40. DRIFT_WARNING = """\
  41. Substantial drift from %s may mean clocks are out of sync. Current drift is
  42. %s seconds. [orig: %s recv: %s]
  43. """
  44. CAN_KWDICT = sys.version_info >= (2, 6, 5)
  45. logger = get_logger(__name__)
  46. warn = logger.warning
  47. R_STATE = '<State: events={0.event_count} tasks={0.task_count}>'
  48. R_WORKER = '<Worker: {0.hostname} ({0.status_string} clock:{0.clock})'
  49. R_TASK = '<Task: {0.name}({0.uuid}) {0.state} clock:{0.clock}>'
  50. __all__ = ['Worker', 'Task', 'State', 'heartbeat_expires']
  51. @memoize(maxsize=1000, keyfun=lambda a, _: a[0])
  52. def _warn_drift(hostname, drift, local_received, timestamp):
  53. # we use memoize here so the warning is only logged once per hostname
  54. warn(DRIFT_WARNING, hostname, drift,
  55. datetime.fromtimestamp(local_received),
  56. datetime.fromtimestamp(timestamp))
  57. def heartbeat_expires(timestamp, freq=60,
  58. expire_window=HEARTBEAT_EXPIRE_WINDOW,
  59. Decimal=Decimal, float=float, isinstance=isinstance):
  60. # some json implementations returns decimal.Decimal objects,
  61. # which are not compatible with float.
  62. freq = float(freq) if isinstance(freq, Decimal) else freq
  63. if isinstance(timestamp, Decimal):
  64. timestamp = float(timestamp)
  65. return timestamp + (freq * (expire_window / 1e2))
  66. def _depickle_task(cls, fields):
  67. return cls(**(fields if CAN_KWDICT else kwdict(fields)))
  68. def with_unique_field(attr):
  69. def _decorate_cls(cls):
  70. def __eq__(this, other):
  71. if isinstance(other, this.__class__):
  72. return getattr(this, attr) == getattr(other, attr)
  73. return NotImplemented
  74. cls.__eq__ = __eq__
  75. def __ne__(this, other):
  76. return not this.__eq__(other)
  77. cls.__ne__ = __ne__
  78. def __hash__(this):
  79. return hash(getattr(this, attr))
  80. cls.__hash__ = __hash__
  81. return cls
  82. return _decorate_cls
  83. @with_unique_field('hostname')
  84. class Worker(object):
  85. """Worker State."""
  86. heartbeat_max = 4
  87. expire_window = HEARTBEAT_EXPIRE_WINDOW
  88. _fields = ('hostname', 'pid', 'freq', 'heartbeats', 'clock',
  89. 'active', 'processed', 'loadavg', 'sw_ident',
  90. 'sw_ver', 'sw_sys')
  91. if not PYPY:
  92. __slots__ = _fields + ('event', '__dict__', '__weakref__')
  93. def __init__(self, hostname=None, pid=None, freq=60,
  94. heartbeats=None, clock=0, active=None, processed=None,
  95. loadavg=None, sw_ident=None, sw_ver=None, sw_sys=None):
  96. self.hostname = hostname
  97. self.pid = pid
  98. self.freq = freq
  99. self.heartbeats = [] if heartbeats is None else heartbeats
  100. self.clock = clock or 0
  101. self.active = active
  102. self.processed = processed
  103. self.loadavg = loadavg
  104. self.sw_ident = sw_ident
  105. self.sw_ver = sw_ver
  106. self.sw_sys = sw_sys
  107. self.event = self._create_event_handler()
  108. def __reduce__(self):
  109. return self.__class__, (self.hostname, self.pid, self.freq,
  110. self.heartbeats, self.clock, self.active,
  111. self.processed, self.loadavg, self.sw_ident,
  112. self.sw_ver, self.sw_sys)
  113. def _create_event_handler(self):
  114. _set = object.__setattr__
  115. hbmax = self.heartbeat_max
  116. heartbeats = self.heartbeats
  117. hb_pop = self.heartbeats.pop
  118. hb_append = self.heartbeats.append
  119. def event(type_, timestamp=None,
  120. local_received=None, fields=None,
  121. max_drift=HEARTBEAT_DRIFT_MAX, items=items, abs=abs, int=int,
  122. insort=bisect.insort, len=len):
  123. fields = fields or {}
  124. for k, v in items(fields):
  125. _set(self, k, v)
  126. if type_ == 'offline':
  127. heartbeats[:] = []
  128. else:
  129. if not local_received or not timestamp:
  130. return
  131. drift = abs(int(local_received) - int(timestamp))
  132. if drift > HEARTBEAT_DRIFT_MAX:
  133. _warn_drift(self.hostname, drift,
  134. local_received, timestamp)
  135. if local_received:
  136. hearts = len(heartbeats)
  137. if hearts > hbmax - 1:
  138. hb_pop(0)
  139. if hearts and local_received > heartbeats[-1]:
  140. hb_append(local_received)
  141. else:
  142. insort(heartbeats, local_received)
  143. return event
  144. def update(self, f, **kw):
  145. for k, v in items(dict(f, **kw) if kw else f):
  146. setattr(self, k, v)
  147. def __repr__(self):
  148. return R_WORKER.format(self)
  149. @property
  150. def status_string(self):
  151. return 'ONLINE' if self.alive else 'OFFLINE'
  152. @property
  153. def heartbeat_expires(self):
  154. return heartbeat_expires(self.heartbeats[-1],
  155. self.freq, self.expire_window)
  156. @property
  157. def alive(self, nowfun=time):
  158. return bool(self.heartbeats and nowfun() < self.heartbeat_expires)
  159. @property
  160. def id(self):
  161. return '{0.hostname}.{0.pid}'.format(self)
  162. @deprecated(3.2, 3.3)
  163. def update_heartbeat(self, received, timestamp):
  164. self.event(None, timestamp, received)
  165. @deprecated(3.2, 3.3)
  166. def on_online(self, timestamp=None, local_received=None, **fields):
  167. self.event('online', timestamp, local_received, fields)
  168. @deprecated(3.2, 3.3)
  169. def on_offline(self, timestamp=None, local_received=None, **fields):
  170. self.event('offline', timestamp, local_received, fields)
  171. @deprecated(3.2, 3.3)
  172. def on_heartbeat(self, timestamp=None, local_received=None, **fields):
  173. self.event('heartbeat', timestamp, local_received, fields)
  174. @class_property
  175. def _defaults(cls):
  176. """Deprecated, to be removed in 3.3"""
  177. source = cls()
  178. return dict((k, getattr(source, k)) for k in cls._fields)
  179. @with_unique_field('uuid')
  180. class Task(object):
  181. """Task State."""
  182. name = received = sent = started = succeeded = failed = retried = \
  183. revoked = args = kwargs = eta = expires = retries = worker = result = \
  184. exception = timestamp = runtime = traceback = exchange = \
  185. routing_key = client = None
  186. state = states.PENDING
  187. clock = 0
  188. _fields = ('uuid', 'name', 'state', 'received', 'sent', 'started',
  189. 'succeeded', 'failed', 'retried', 'revoked', 'args', 'kwargs',
  190. 'eta', 'expires', 'retries', 'worker', 'result', 'exception',
  191. 'timestamp', 'runtime', 'traceback', 'exchange', 'routing_key',
  192. 'clock', 'client')
  193. if not PYPY:
  194. __slots__ = ('__dict__', '__weakref__')
  195. #: How to merge out of order events.
  196. #: Disorder is detected by logical ordering (e.g. :event:`task-received`
  197. #: must have happened before a :event:`task-failed` event).
  198. #:
  199. #: A merge rule consists of a state and a list of fields to keep from
  200. #: that state. ``(RECEIVED, ('name', 'args')``, means the name and args
  201. #: fields are always taken from the RECEIVED state, and any values for
  202. #: these fields received before or after is simply ignored.
  203. merge_rules = {states.RECEIVED: ('name', 'args', 'kwargs',
  204. 'retries', 'eta', 'expires')}
  205. #: meth:`info` displays these fields by default.
  206. _info_fields = ('args', 'kwargs', 'retries', 'result', 'eta', 'runtime',
  207. 'expires', 'exception', 'exchange', 'routing_key')
  208. def __init__(self, uuid=None, **kwargs):
  209. self.uuid = uuid
  210. if kwargs:
  211. for k, v in items(kwargs):
  212. setattr(self, k, v)
  213. def event(self, type_, timestamp=None, local_received=None, fields=None,
  214. precedence=states.precedence, items=items, dict=dict,
  215. PENDING=states.PENDING, RECEIVED=states.RECEIVED,
  216. STARTED=states.STARTED, FAILURE=states.FAILURE,
  217. RETRY=states.RETRY, SUCCESS=states.SUCCESS,
  218. REVOKED=states.REVOKED):
  219. fields = fields or {}
  220. if type_ == 'sent':
  221. state, self.sent = PENDING, timestamp
  222. elif type_ == 'received':
  223. state, self.received = RECEIVED, timestamp
  224. elif type_ == 'started':
  225. state, self.started = STARTED, timestamp
  226. elif type_ == 'failed':
  227. state, self.failed = FAILURE, timestamp
  228. elif type_ == 'retried':
  229. state, self.retried = RETRY, timestamp
  230. elif type_ == 'succeeded':
  231. state, self.succeeded = SUCCESS, timestamp
  232. elif type_ == 'revoked':
  233. state, self.revoked = REVOKED, timestamp
  234. else:
  235. state = type_.upper()
  236. # note that precedence here is reversed
  237. # see implementation in celery.states.state.__lt__
  238. if state != RETRY and self.state != RETRY and \
  239. precedence(state) > precedence(self.state):
  240. # this state logically happens-before the current state, so merge.
  241. keep = self.merge_rules.get(state)
  242. if keep is not None:
  243. fields = dict(
  244. (k, v) for k, v in items(fields) if k in keep
  245. )
  246. for key, value in items(fields):
  247. setattr(self, key, value)
  248. else:
  249. self.state = state
  250. self.timestamp = timestamp
  251. for key, value in items(fields):
  252. setattr(self, key, value)
  253. def info(self, fields=None, extra=[]):
  254. """Information about this task suitable for on-screen display."""
  255. fields = self._info_fields if fields is None else fields
  256. def _keys():
  257. for key in list(fields) + list(extra):
  258. value = getattr(self, key, None)
  259. if value is not None:
  260. yield key, value
  261. return dict(_keys())
  262. def __repr__(self):
  263. return R_TASK.format(self)
  264. def as_dict(self):
  265. get = object.__getattribute__
  266. return dict(
  267. (k, get(self, k)) for k in self._fields
  268. )
  269. def __reduce__(self):
  270. return _depickle_task, (self.__class__, self.as_dict())
  271. @property
  272. def origin(self):
  273. return self.client if self.worker is None else self.worker.id
  274. @property
  275. def ready(self):
  276. return self.state in states.READY_STATES
  277. @deprecated(3.2, 3.3)
  278. def on_sent(self, timestamp=None, **fields):
  279. self.event('sent', timestamp, fields)
  280. @deprecated(3.2, 3.3)
  281. def on_received(self, timestamp=None, **fields):
  282. self.event('received', timestamp, fields)
  283. @deprecated(3.2, 3.3)
  284. def on_started(self, timestamp=None, **fields):
  285. self.event('started', timestamp, fields)
  286. @deprecated(3.2, 3.3)
  287. def on_failed(self, timestamp=None, **fields):
  288. self.event('failed', timestamp, fields)
  289. @deprecated(3.2, 3.3)
  290. def on_retried(self, timestamp=None, **fields):
  291. self.event('retried', timestamp, fields)
  292. @deprecated(3.2, 3.3)
  293. def on_succeeded(self, timestamp=None, **fields):
  294. self.event('succeeded', timestamp, fields)
  295. @deprecated(3.2, 3.3)
  296. def on_revoked(self, timestamp=None, **fields):
  297. self.event('revoked', timestamp, fields)
  298. @deprecated(3.2, 3.3)
  299. def on_unknown_event(self, shortype, timestamp=None, **fields):
  300. self.event(shortype, timestamp, fields)
  301. @deprecated(3.2, 3.3)
  302. def update(self, state, timestamp, fields,
  303. _state=states.state, RETRY=states.RETRY):
  304. return self.event(state, timestamp, None, fields)
  305. @deprecated(3.2, 3.3)
  306. def merge(self, state, timestamp, fields):
  307. keep = self.merge_rules.get(state)
  308. if keep is not None:
  309. fields = dict((k, v) for k, v in items(fields) if k in keep)
  310. for key, value in items(fields):
  311. setattr(self, key, value)
  312. @class_property
  313. def _defaults(cls):
  314. """Deprecated, to be removed in 3.3."""
  315. source = cls()
  316. return dict((k, getattr(source, k)) for k in source._fields)
  317. class State(object):
  318. """Records clusters state."""
  319. Worker = Worker
  320. Task = Task
  321. event_count = 0
  322. task_count = 0
  323. heap_multiplier = 4
  324. def __init__(self, callback=None,
  325. workers=None, tasks=None, taskheap=None,
  326. max_workers_in_memory=5000, max_tasks_in_memory=10000,
  327. on_node_join=None, on_node_leave=None):
  328. self.event_callback = callback
  329. self.workers = (LRUCache(max_workers_in_memory)
  330. if workers is None else workers)
  331. self.tasks = (LRUCache(max_tasks_in_memory)
  332. if tasks is None else tasks)
  333. self._taskheap = [] if taskheap is None else taskheap
  334. self.max_workers_in_memory = max_workers_in_memory
  335. self.max_tasks_in_memory = max_tasks_in_memory
  336. self.on_node_join = on_node_join
  337. self.on_node_leave = on_node_leave
  338. self._mutex = threading.Lock()
  339. self.handlers = {}
  340. self._seen_types = set()
  341. self.rebuild_taskheap()
  342. @cached_property
  343. def _event(self):
  344. return self._create_dispatcher()
  345. def freeze_while(self, fun, *args, **kwargs):
  346. clear_after = kwargs.pop('clear_after', False)
  347. with self._mutex:
  348. try:
  349. return fun(*args, **kwargs)
  350. finally:
  351. if clear_after:
  352. self._clear()
  353. def clear_tasks(self, ready=True):
  354. with self._mutex:
  355. return self._clear_tasks(ready)
  356. def _clear_tasks(self, ready=True):
  357. if ready:
  358. in_progress = dict(
  359. (uuid, task) for uuid, task in self.itertasks()
  360. if task.state not in states.READY_STATES)
  361. self.tasks.clear()
  362. self.tasks.update(in_progress)
  363. else:
  364. self.tasks.clear()
  365. self._taskheap[:] = []
  366. def _clear(self, ready=True):
  367. self.workers.clear()
  368. self._clear_tasks(ready)
  369. self.event_count = 0
  370. self.task_count = 0
  371. def clear(self, ready=True):
  372. with self._mutex:
  373. return self._clear(ready)
  374. def get_or_create_worker(self, hostname, **kwargs):
  375. """Get or create worker by hostname.
  376. Return tuple of ``(worker, was_created)``.
  377. """
  378. try:
  379. worker = self.workers[hostname]
  380. if kwargs:
  381. worker.update(kwargs)
  382. return worker, False
  383. except KeyError:
  384. worker = self.workers[hostname] = self.Worker(
  385. hostname, **kwargs)
  386. return worker, True
  387. def get_or_create_task(self, uuid):
  388. """Get or create task by uuid."""
  389. try:
  390. return self.tasks[uuid], False
  391. except KeyError:
  392. task = self.tasks[uuid] = self.Task(uuid)
  393. return task, True
  394. def event(self, event):
  395. with self._mutex:
  396. return self._event(event)
  397. def task_event(self, type_, fields):
  398. """Deprecated, use :meth:`event`."""
  399. return self._event(dict(fields, type='-'.join(['task', type_])))[0]
  400. def worker_event(self, type_, fields):
  401. """Deprecated, use :meth:`event`."""
  402. return self._event(dict(fields, type='-'.join(['worker', type_])))[0]
  403. def _create_dispatcher(self):
  404. get_handler = self.handlers.__getitem__
  405. event_callback = self.event_callback
  406. wfields = itemgetter('hostname', 'timestamp', 'local_received')
  407. tfields = itemgetter('uuid', 'hostname', 'timestamp',
  408. 'local_received', 'clock')
  409. taskheap = self._taskheap
  410. th_append = taskheap.append
  411. th_pop = taskheap.pop
  412. # Removing events from task heap is an O(n) operation,
  413. # so easier to just account for the common number of events
  414. # for each task (PENDING->RECEIVED->STARTED->final)
  415. #: an O(n) operation
  416. max_events_in_heap = self.max_tasks_in_memory * self.heap_multiplier
  417. add_type = self._seen_types.add
  418. on_node_join, on_node_leave = self.on_node_join, self.on_node_leave
  419. tasks, Task = self.tasks, self.Task
  420. workers, Worker = self.workers, self.Worker
  421. # avoid updating LRU entry at getitem
  422. get_worker, get_task = workers.data.__getitem__, tasks.data.__getitem__
  423. def _event(event,
  424. timetuple=timetuple, KeyError=KeyError,
  425. insort=bisect.insort, created=True):
  426. self.event_count += 1
  427. if event_callback:
  428. event_callback(self, event)
  429. group, _, subject = event['type'].partition('-')
  430. try:
  431. handler = get_handler(group)
  432. except KeyError:
  433. pass
  434. else:
  435. return handler(subject, event), subject
  436. if group == 'worker':
  437. try:
  438. hostname, timestamp, local_received = wfields(event)
  439. except KeyError:
  440. pass
  441. else:
  442. is_offline = subject == 'offline'
  443. try:
  444. worker, created = get_worker(hostname), False
  445. except KeyError:
  446. if is_offline:
  447. worker, created = Worker(hostname), False
  448. else:
  449. worker = workers[hostname] = Worker(hostname)
  450. worker.event(subject, timestamp, local_received, event)
  451. if on_node_join and (created or subject == 'online'):
  452. on_node_join(worker)
  453. if on_node_leave and is_offline:
  454. on_node_leave(worker)
  455. workers.pop(hostname, None)
  456. return (worker, created), subject
  457. elif group == 'task':
  458. (uuid, hostname, timestamp,
  459. local_received, clock) = tfields(event)
  460. # task-sent event is sent by client, not worker
  461. is_client_event = subject == 'sent'
  462. try:
  463. task, created = get_task(uuid), False
  464. except KeyError:
  465. task = tasks[uuid] = Task(uuid)
  466. if is_client_event:
  467. task.client = hostname
  468. else:
  469. try:
  470. worker, created = get_worker(hostname), False
  471. except KeyError:
  472. worker = workers[hostname] = Worker(hostname)
  473. task.worker = worker
  474. if worker is not None and local_received:
  475. worker.event(None, local_received, timestamp)
  476. origin = hostname if is_client_event else worker.id
  477. # remove oldest event if exceeding the limit.
  478. heaps = len(taskheap)
  479. if heaps + 1 > max_events_in_heap:
  480. th_pop(0)
  481. # most events will be dated later than the previous.
  482. timetup = timetuple(clock, timestamp, origin, ref(task))
  483. if heaps and timetup > taskheap[-1]:
  484. th_append(timetup)
  485. else:
  486. insort(taskheap, timetup)
  487. if subject == 'received':
  488. self.task_count += 1
  489. task.event(subject, timestamp, local_received, event)
  490. task_name = task.name
  491. if task_name is not None:
  492. add_type(task_name)
  493. return (task, created), subject
  494. return _event
  495. def rebuild_taskheap(self, timetuple=timetuple):
  496. heap = self._taskheap[:] = [
  497. timetuple(t.clock, t.timestamp, t.origin, ref(t))
  498. for t in values(self.tasks)
  499. ]
  500. heap.sort()
  501. def itertasks(self, limit=None):
  502. for index, row in enumerate(items(self.tasks)):
  503. yield row
  504. if limit and index + 1 >= limit:
  505. break
  506. def tasks_by_time(self, limit=None):
  507. """Generator giving tasks ordered by time,
  508. in ``(uuid, Task)`` tuples."""
  509. seen = set()
  510. for evtup in islice(reversed(self._taskheap), 0, limit):
  511. task = evtup[3]()
  512. if task is not None:
  513. uuid = task.uuid
  514. if uuid not in seen:
  515. yield uuid, task
  516. seen.add(uuid)
  517. tasks_by_timestamp = tasks_by_time
  518. def tasks_by_type(self, name, limit=None):
  519. """Get all tasks by type.
  520. Return a list of ``(uuid, Task)`` tuples.
  521. """
  522. return islice(
  523. ((uuid, task) for uuid, task in self.tasks_by_time()
  524. if task.name == name),
  525. 0, limit,
  526. )
  527. def tasks_by_worker(self, hostname, limit=None):
  528. """Get all tasks by worker.
  529. """
  530. return islice(
  531. ((uuid, task) for uuid, task in self.tasks_by_time()
  532. if task.worker.hostname == hostname),
  533. 0, limit,
  534. )
  535. def task_types(self):
  536. """Return a list of all seen task types."""
  537. return sorted(self._seen_types)
  538. def alive_workers(self):
  539. """Return a list of (seemingly) alive workers."""
  540. return [w for w in values(self.workers) if w.alive]
  541. def __repr__(self):
  542. return R_STATE.format(self)
  543. def __reduce__(self):
  544. return self.__class__, (
  545. self.event_callback, self.workers, self.tasks, None,
  546. self.max_workers_in_memory, self.max_tasks_in_memory,
  547. self.on_node_join, self.on_node_leave,
  548. )